code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def a__ ( self , _a=0 ) -> List[str]:
_A : Dict = np.random.RandomState(_a )
_A : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self ) -> Optional[int]:
_A : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_a )
_A : int = self.get_dummy_inputs()
_A : int = pipe(**_a ).images
_A : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : Any = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self ) -> List[str]:
_A : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_A : List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_a )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[int] = self.get_dummy_inputs()
_A : Tuple = pipe(**_a ).images
_A : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : List[Any] = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_A : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_A : Tuple = self.get_dummy_inputs()
_A : Any = pipe(**_a ).images
_A : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : Optional[int] = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self ) -> Union[str, Any]:
_A : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_A : Optional[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[int] = self.get_dummy_inputs()
_A : int = pipe(**_a ).images
_A : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : Any = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self ) -> Tuple:
_A : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_A : Optional[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_A : Any = self.get_dummy_inputs()
_A : int = pipe(**_a ).images
_A : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : Tuple = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self ) -> str:
_A : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_A : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_A : int = self.get_dummy_inputs()
_A : int = pipe(**_a ).images
_A : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_A : Optional[int] = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self ) -> Dict:
_A : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_a )
_A : str = self.get_dummy_inputs()
_A : List[str] = 3 * [inputs["""prompt"""]]
# forward
_A : Any = pipe(**_a )
_A : Any = output.images[0, -3:, -3:, -1]
_A : Optional[int] = self.get_dummy_inputs()
_A : Optional[Any] = 3 * [inputs.pop("""prompt""" )]
_A : Dict = pipe.tokenizer(
_a , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_a , return_tensors="""np""" , )
_A : Union[str, Any] = text_inputs["""input_ids"""]
_A : Union[str, Any] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
_A : int = prompt_embeds
# forward
_A : Dict = pipe(**_a )
_A : Optional[Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def a__ ( self ) -> List[str]:
_A : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[int] = self.get_dummy_inputs()
_A : Dict = 3 * ["""this is a negative prompt"""]
_A : List[Any] = negative_prompt
_A : int = 3 * [inputs["""prompt"""]]
# forward
_A : str = pipe(**_a )
_A : Dict = output.images[0, -3:, -3:, -1]
_A : Optional[int] = self.get_dummy_inputs()
_A : List[Any] = 3 * [inputs.pop("""prompt""" )]
_A : Tuple = []
for p in [prompt, negative_prompt]:
_A : str = pipe.tokenizer(
_a , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_a , return_tensors="""np""" , )
_A : Union[str, Any] = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
_A , _A : List[Any] = embeds
# forward
_A : Tuple = pipe(**_a )
_A : Optional[Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
@property
def a__ ( self ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ ( self ) -> Dict:
_A : Any = ort.SessionOptions()
_A : int = False
return options
def a__ ( self ) -> List[Any]:
# using the PNDM scheduler by default
_A : str = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
_A : Union[str, Any] = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
_A : Union[str, Any] = output.images
_A : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A : int = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a__ ( self ) -> Union[str, Any]:
_A : Optional[int] = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
_A : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_a )
_A : Optional[int] = """open neural network exchange"""
_A : Optional[Any] = np.random.RandomState(0 )
_A : Any = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type="""np""" )
_A : Dict = output.images
_A : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A : Union[str, Any] = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a__ ( self ) -> List[str]:
_A : str = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
_A : str = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_a )
_A : str = """open neural network exchange"""
_A : int = np.random.RandomState(0 )
_A : Union[str, Any] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type="""np""" )
_A : List[Any] = output.images
_A : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A : Union[str, Any] = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a__ ( self ) -> int:
_A : List[Any] = 0
def test_callback_fn(_a , _a , _a ) -> None:
_A : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_A : Optional[int] = latents[0, -3:, -3:, -1]
_A : str = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_A : Union[str, Any] = latents[0, -3:, -3:, -1]
_A : Tuple = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
_A : List[Any] = False
_A : Any = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = """Andromeda galaxy in a bottle"""
_A : Optional[int] = np.random.RandomState(0 )
pipe(
prompt=_a , num_inference_steps=5 , guidance_scale=7.5 , generator=_a , callback=_a , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def a__ ( self ) -> Union[str, Any]:
_A : Dict = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_a , _a )
assert pipe.safety_checker is None
_A : Dict = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_a )
_A : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(_a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_A : Union[str, Any] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 54
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( snake_case_ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case_,snake_case_ ) -> bool:
_A : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A : Optional[int] = mean(
int(is_in_circle(uniform(-1.0,1.0 ),uniform(-1.0,1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
_A : List[str] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0.0,snake_case_ = 1.0,):
return mean(
function_to_integrate(uniform(snake_case_,snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.0,snake_case_ = 1.0 ):
def identity_function(snake_case_ ) -> float:
return x
_A : Any = area_under_curve_estimator(
snake_case_,snake_case_,snake_case_,snake_case_ )
_A : Tuple = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def lowerCAmelCase_ ( snake_case_ ):
def function_to_integrate(snake_case_ ) -> float:
return sqrt(4.0 - x * x )
_A : Optional[int] = area_under_curve_estimator(
snake_case_,snake_case_,0.0,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> int:
_A : int = tempfile.mkdtemp()
_A : Union[str, Any] = 8
# DPR tok
_A : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_A : List[str] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_A : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_A : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_A : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_A : Dict = {"""unk_token""": """<unk>"""}
_A : Optional[Any] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_A : List[Any] = os.path.join(_a , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def a__ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def a__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def a__ ( self ) -> str:
_A : Optional[Any] = os.path.join(self.tmpdirname , """rag_tokenizer""" )
_A : int = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_A : Any = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_a )
rag_tokenizer.save_pretrained(_a )
_A : Optional[Any] = RagTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def a__ ( self ) -> str:
_A : Union[str, Any] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
_A : Tuple = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Tuple = tokenizer(_a )
self.assertIsNotNone(_a )
@slow
def a__ ( self ) -> Dict:
_A : Dict = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
_A : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Optional[Any] = tokenizer(_a )
self.assertIsNotNone(_a )
| 54
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : List[str] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def a__ ( self ) -> List[Any]:
torch.manual_seed(0 )
_A : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , )
return model
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Dict = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
_A : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def a__ ( self ) -> int:
_A : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : List[str] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_A : List[str] = DDPMScheduler()
_A : Dict = AudioDiffusionPipeline(vqvae=_a , unet=self.dummy_unet , mel=_a , scheduler=_a )
_A : Optional[int] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[int] = torch.Generator(device=_a ).manual_seed(42 )
_A : Union[str, Any] = pipe(generator=_a , steps=4 )
_A : int = output.audios[0]
_A : Dict = output.images[0]
_A : Any = torch.Generator(device=_a ).manual_seed(42 )
_A : Tuple = pipe(generator=_a , steps=4 , return_dict=_a )
_A : List[Any] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_A : List[str] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_A : Union[str, Any] = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10]
_A : List[Any] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_A : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_A : Any = DDIMScheduler()
_A : Tuple = self.dummy_vqvae_and_unet
_A : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_a , scheduler=_a )
_A : List[Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
np.random.seed(0 )
_A : Dict = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_A : int = torch.Generator(device=_a ).manual_seed(42 )
_A : int = pipe(raw_audio=_a , generator=_a , start_step=5 , steps=10 )
_A : Dict = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_A : Optional[Any] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_A : List[Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_A : List[Any] = self.dummy_unet_condition
_A : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_a , mel=_a , scheduler=_a )
_A : List[str] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
np.random.seed(0 )
_A : List[str] = torch.rand((1, 1, 10) )
_A : Tuple = pipe(generator=_a , encoding=_a )
_A : Tuple = output.images[0]
_A : Optional[int] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_A : int = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> Optional[int]:
_A : Tuple = torch_device
_A : Union[str, Any] = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
_A : Optional[int] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[str] = torch.Generator(device=_a ).manual_seed(42 )
_A : Dict = pipe(generator=_a )
_A : Optional[int] = output.audios[0]
_A : Optional[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_A : Union[str, Any] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_A : Any = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 54
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
_A : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_A : str = self.transformer_dir
shutil.copy(
os.path.join(_a , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def a__ ( self ) -> Optional[int]:
_A : List[str] = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def a__ ( self , _a , _a , _a , _a=None ) -> Optional[Any]:
_A : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_A : List[str] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_A : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_A : Optional[int] = black.format_str(_a , mode=_a )
_A : Optional[Any] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_a , """w""" , newline="""\n""" ) as f:
f.write(_a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_a )
with open(_a , """r""" ) as f:
self.assertTrue(f.read() , _a )
def a__ ( self ) -> str:
_A : Union[str, Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _a , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _a ) , )
# Copy consistency with a really long name
_A : List[str] = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , _a , _a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _a , overwrite_result=re.sub("""Bert""" , """TestModel""" , _a ) , )
def a__ ( self ) -> Tuple:
_A : Union[str, Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Any = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_A , _A : Tuple = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
self.assertFalse(_a )
self.assertEqual(_a , _a )
_A , _A : List[str] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_a )
_A : Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_A : Dict = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Optional[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A , _A : Optional[int] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_a , _a )
| 54
| 1
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Optional[Any]:
_A : List[str] = torch.nn.Linear(10 , 10 )
_A : int = torch.optim.SGD(model.parameters() , 0.1 )
_A : str = Accelerator()
_A : List[Any] = accelerator.prepare(_a )
try:
pickle.loads(pickle.dumps(_a ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 54
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=2048 , _a=128 , _a=1 , _a=512 , _a=30 , _a=4_4100 , ) -> Tuple:
_A : Any = parent
_A : str = batch_size
_A : Union[str, Any] = min_seq_length
_A : int = max_seq_length
_A : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = spectrogram_length
_A : int = feature_size
_A : str = num_audio_channels
_A : Tuple = hop_length
_A : List[str] = chunk_length
_A : Union[str, Any] = sampling_rate
def a__ ( self ) -> Tuple:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a__ ( self , _a=False , _a=False ) -> Optional[int]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : List[Any] = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = TvltFeatureExtractor
def a__ ( self ) -> Any:
_A : int = TvltFeatureExtractionTester(self )
def a__ ( self ) -> List[Any]:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , """spectrogram_length""" ) )
self.assertTrue(hasattr(_a , """feature_size""" ) )
self.assertTrue(hasattr(_a , """num_audio_channels""" ) )
self.assertTrue(hasattr(_a , """hop_length""" ) )
self.assertTrue(hasattr(_a , """chunk_length""" ) )
self.assertTrue(hasattr(_a , """sampling_rate""" ) )
def a__ ( self ) -> Optional[int]:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : int = feat_extract_second.to_dict()
_A : int = dict_first.pop("""mel_filters""" )
_A : Optional[int] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Union[str, Any] = self.feature_extraction_class.from_json_file(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : Union[str, Any] = feat_extract_second.to_dict()
_A : List[Any] = dict_first.pop("""mel_filters""" )
_A : Optional[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Optional[Any]:
# Initialize feature_extractor
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_A : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A : str = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A : Optional[Any] = feature_extractor(
_a , return_tensors="""np""" , sampling_rate=4_4100 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Optional[int] = np.asarray(_a )
_A : Optional[Any] = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def a__ ( self , _a ) -> str:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Dict = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Optional[Any]:
_A : List[str] = self._load_datasamples(1 )
_A : List[str] = TvltFeatureExtractor()
_A : List[Any] = feature_extractor(_a , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A : int = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 54
| 1
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 54
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 1
|
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 1 / sqrt(2 ) ):
_A : Dict = tau * frequency / samplerate
_A : List[str] = sin(snake_case_ )
_A : Tuple = cos(snake_case_ )
_A : Optional[Any] = _sin / (2 * q_factor)
_A : Union[str, Any] = (1 - _cos) / 2
_A : Optional[int] = 1 - _cos
_A : List[Any] = 1 + alpha
_A : Tuple = -2 * _cos
_A : Optional[int] = 1 - alpha
_A : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa],[ba, ba, ba] )
return filt
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 1 / sqrt(2 ) ):
_A : List[Any] = tau * frequency / samplerate
_A : Optional[Any] = sin(snake_case_ )
_A : int = cos(snake_case_ )
_A : int = _sin / (2 * q_factor)
_A : Dict = (1 + _cos) / 2
_A : Tuple = -1 - _cos
_A : Dict = 1 + alpha
_A : List[Any] = -2 * _cos
_A : List[Any] = 1 - alpha
_A : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa],[ba, ba, ba] )
return filt
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 1 / sqrt(2 ) ):
_A : List[Any] = tau * frequency / samplerate
_A : Tuple = sin(snake_case_ )
_A : Tuple = cos(snake_case_ )
_A : Any = _sin / (2 * q_factor)
_A : List[str] = _sin / 2
_A : Any = 0
_A : Optional[Any] = -ba
_A : str = 1 + alpha
_A : int = -2 * _cos
_A : Any = 1 - alpha
_A : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa],[ba, ba, ba] )
return filt
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 1 / sqrt(2 ) ):
_A : Optional[Any] = tau * frequency / samplerate
_A : Any = sin(snake_case_ )
_A : List[str] = cos(snake_case_ )
_A : str = _sin / (2 * q_factor)
_A : Optional[int] = 1 - alpha
_A : int = -2 * _cos
_A : Optional[Any] = 1 + alpha
_A : List[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba],[ba, ba, ba] )
return filt
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 1 / sqrt(2 ),):
_A : str = tau * frequency / samplerate
_A : int = sin(snake_case_ )
_A : List[str] = cos(snake_case_ )
_A : Optional[Any] = _sin / (2 * q_factor)
_A : Any = 10 ** (gain_db / 40)
_A : List[Any] = 1 + alpha * big_a
_A : int = -2 * _cos
_A : Dict = 1 - alpha * big_a
_A : Tuple = 1 + alpha / big_a
_A : str = -2 * _cos
_A : Optional[Any] = 1 - alpha / big_a
_A : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa],[ba, ba, ba] )
return filt
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 1 / sqrt(2 ),):
_A : Optional[int] = tau * frequency / samplerate
_A : List[str] = sin(snake_case_ )
_A : str = cos(snake_case_ )
_A : List[Any] = _sin / (2 * q_factor)
_A : Tuple = 10 ** (gain_db / 40)
_A : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
_A : Dict = (big_a + 1) + (big_a - 1) * _cos
_A : str = (big_a - 1) - (big_a + 1) * _cos
_A : str = (big_a - 1) + (big_a + 1) * _cos
_A : str = 2 * sqrt(snake_case_ ) * alpha
_A : Optional[Any] = big_a * (pmc + aaa)
_A : Optional[Any] = 2 * big_a * mpc
_A : str = big_a * (pmc - aaa)
_A : Any = ppmc + aaa
_A : str = -2 * pmpc
_A : Optional[int] = ppmc - aaa
_A : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa],[ba, ba, ba] )
return filt
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 1 / sqrt(2 ),):
_A : Optional[int] = tau * frequency / samplerate
_A : List[Any] = sin(snake_case_ )
_A : List[str] = cos(snake_case_ )
_A : List[Any] = _sin / (2 * q_factor)
_A : Dict = 10 ** (gain_db / 40)
_A : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
_A : Any = (big_a + 1) + (big_a - 1) * _cos
_A : Union[str, Any] = (big_a - 1) - (big_a + 1) * _cos
_A : int = (big_a - 1) + (big_a + 1) * _cos
_A : Dict = 2 * sqrt(snake_case_ ) * alpha
_A : List[Any] = big_a * (ppmc + aaa)
_A : Any = -2 * big_a * pmpc
_A : Union[str, Any] = big_a * (ppmc - aaa)
_A : Union[str, Any] = pmc + aaa
_A : int = 2 * mpc
_A : Optional[int] = pmc - aaa
_A : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa],[ba, ba, ba] )
return filt
| 54
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = KandinskyVaaImgaImgPipeline
_a = ["image_embeds", "negative_image_embeds", "image"]
_a = [
"image_embeds",
"negative_image_embeds",
"image",
]
_a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> int:
return 32
@property
def a__ ( self ) -> Union[str, Any]:
return 32
@property
def a__ ( self ) -> List[str]:
return self.time_input_dim
@property
def a__ ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> str:
return 100
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_A : Union[str, Any] = UNetaDConditionModel(**_a )
return model
@property
def a__ ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self ) -> int:
_A : Any = self.dummy_unet
_A : List[Any] = self.dummy_movq
_A : str = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_A : int = DDIMScheduler(**_a )
_A : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self , _a , _a=0 ) -> str:
_A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_A : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : Optional[Any] = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((256, 256) )
if str(_a ).startswith("""mps""" ):
_A : Tuple = torch.manual_seed(_a )
else:
_A : str = torch.Generator(device=_a ).manual_seed(_a )
_A : Optional[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> Union[str, Any]:
_A : Dict = """cpu"""
_A : int = self.get_dummy_components()
_A : Optional[int] = self.pipeline_class(**_a )
_A : Any = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = pipe(**self.get_dummy_inputs(_a ) )
_A : Dict = output.images
_A : List[str] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_A : Dict = image[0, -3:, -3:, -1]
_A : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Optional[int] = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> List[str]:
_A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_A : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_A : Dict = """A red cartoon frog, 4k"""
_A : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_A : int = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_A : Dict = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A , _A : List[str] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_A : int = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 54
| 1
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> int:
_A : Any = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
_A : Optional[int] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
_A : int = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
_A : Union[str, Any] = tf_top_k_top_p_filtering(_a , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
_A : Tuple = output[output != -float("""inf""" )]
_A : List[Any] = tf.cast(
tf.where(tf.not_equal(_a , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_a , _a , rtol=1e-12 )
tf.debugging.assert_equal(_a , _a )
@require_tf
class lowercase ( unittest.TestCase,UpperCamelCase__ ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
_a = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def a__ ( self ) -> List[Any]:
# TF-only test: tf.saved_model export
_A : Optional[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
_A : int = 2
_A : int = 2
class lowercase ( tf.Module ):
def __init__( self , _a ) -> List[str]:
super(_a , self ).__init__()
_A : int = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=_a , )
def a__ ( self , _a , _a ) -> Any:
_A : Any = self.model.generate(
input_ids=_a , attention_mask=_a , max_new_tokens=_a , return_dict_in_generate=_a , )
return {"sequences": outputs["sequences"]}
_A : int = [[2, 0], [102, 103]]
_A : Any = [[1, 0], [1, 1]]
_A : Optional[int] = DummyModel(model=_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_a , _a , signatures={"""serving_default""": dummy_model.serving} )
_A : Union[str, Any] = tf.saved_model.load(_a ).signatures["""serving_default"""]
for batch_size in range(1 , len(_a ) + 1 ):
_A : List[Any] = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
_A : List[Any] = serving_func(**_a )["""sequences"""]
_A : List[Any] = test_model.generate(**_a , max_new_tokens=_a )
tf.debugging.assert_equal(_a , _a )
@slow
def a__ ( self ) -> str:
# TF-only test: tf.saved_model export
_A : Tuple = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
_A : Union[str, Any] = 1
_A : str = 2
class lowercase ( tf.Module ):
def __init__( self , _a ) -> str:
super(_a , self ).__init__()
_A : Optional[Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=_a , )
def a__ ( self , _a , _a ) -> Any:
_A : Tuple = self.model.generate(
input_ids=_a , attention_mask=_a , max_new_tokens=_a , return_dict_in_generate=_a , )
return {"sequences": outputs["sequences"]}
_A : Optional[Any] = [[2], [102, 103]]
_A : List[Any] = [[1], [1, 1]]
_A : List[Any] = DummyModel(model=_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_a , _a , signatures={"""serving_default""": dummy_model.serving} )
_A : Optional[int] = tf.saved_model.load(_a ).signatures["""serving_default"""]
for input_row in range(len(_a ) ):
_A : List[str] = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
_A : int = serving_func(**_a )["""sequences"""]
_A : str = test_model.generate(**_a , max_new_tokens=_a )
tf.debugging.assert_equal(_a , _a )
@slow
@require_tensorflow_text
def a__ ( self ) -> Tuple:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=_a )
class lowercase ( tf.keras.layers.Layer ):
def __init__( self ) -> Tuple:
super().__init__()
_A : str = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_a , """spiece.model""" ) , """rb""" ).read() )
_A : str = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def a__ ( self , _a , *_a , **_a ) -> str:
_A : Any = self.tokenizer.tokenize(_a )
_A , _A : Any = text.pad_model_inputs(
_a , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
_A : Any = self.model.generate(input_ids=_a , attention_mask=_a )
return self.tokenizer.detokenize(_a )
_A : Optional[int] = CompleteSentenceTransformer()
_A : Dict = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
_A : List[str] = complete_model(_a )
_A : List[Any] = tf.keras.Model(_a , _a )
keras_model.save(_a )
def a__ ( self ) -> Optional[Any]:
# Has PT equivalent: this test relies on random sampling
_A : Optional[int] = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
_A : Any = 14
_A : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
_A : str = """Hello, my dog is cute and"""
_A : Optional[Any] = tokenizer(_a , return_tensors="""tf""" )
_A : List[str] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
_A : Optional[Any] = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
_A : Dict = model.generate(**_a , eos_token_id=_a , **_a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
_A : int = [638, 198]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
_A : int = model.generate(**_a , eos_token_id=_a , **_a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def a__ ( self ) -> Optional[Any]:
# Has PT equivalent: ample use of framework-specific code
_A : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
_A : Tuple = """Hugging Face is a technology company based in New York and Paris."""
_A : Union[str, Any] = bart_tokenizer(_a , return_tensors="""tf""" ).input_ids
_A : Dict = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
_A : Optional[Any] = bart_model.generate(_a ).numpy()
class lowercase ( UpperCamelCase__ ):
def a__ ( self , _a , _a=None , **_a ) -> Dict:
return super().call(_a , **_a )
_A : Union[str, Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
_A : List[str] = bart_model.generate(_a , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(_a , _a ) )
class lowercase ( bart_model.model.encoder.__class__ ):
def a__ ( self , _a , **_a ) -> Union[str, Any]:
return super().call(_a , **_a )
_A : Optional[int] = FakeEncoder(bart_model.config , bart_model.model.shared )
_A : Optional[int] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
_A : Optional[int] = bart_model.generate(_a ).numpy()
with self.assertRaises(_a ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_a , foo="""bar""" )
| 54
|
def lowerCAmelCase_ ( snake_case_ = 1000000 ):
_A : Any = limit + 1
_A : Tuple = [0] * limit
for first_term in range(1,snake_case_ ):
for n in range(snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_A : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54
| 1
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ ):
_A : int = len(snake_case_ ) // 2
# choose the middle 3 elements
_A : int = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowercase ( tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ) -> Any:
super().__init__()
_A : Dict = pad_token_id
_A : List[Any] = max_length
_A : Optional[int] = vocab
_A : Optional[int] = merges
_A : Optional[int] = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> str:
_A : Any = [""" """.join(_a ) for m in tokenizer.bpe_ranks.keys()]
_A : str = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> List[Any]:
_A : Union[str, Any] = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def a__ ( cls , _a ) -> Union[str, Any]:
return cls(**_a )
def a__ ( self ) -> Union[str, Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a__ ( self , _a , _a = None ) -> int:
_A : Optional[int] = self.tf_tokenizer(_a )
_A : Tuple = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
_A : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
_A , _A : Dict = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 54
| 1
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_snake_case = "\\n Text data.\n Second line of data."
_snake_case = "file"
@pytest.fixture(scope="""session""" )
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
_A : Tuple = bytes(snake_case_,"""utf-8""" )
with zstd.open(snake_case_,"""wb""" ) as f:
f.write(snake_case_ )
return path
@pytest.fixture
def lowerCAmelCase_ ( snake_case_ ):
with open(os.path.join(tmpfs.local_root_dir,snake_case_ ),"""w""" ) as f:
f.write(snake_case_ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""",["""gzip""", """xz""", """zstd"""] )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Tuple = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
_A : Optional[int] = input_paths[compression_format]
_A : Any = tmp_path / """cache"""
_A : List[str] = DownloadConfig(cache_dir=snake_case_,extract_compressed_file=snake_case_ )
_A : Union[str, Any] = cached_path(snake_case_,download_config=snake_case_ )
with open(snake_case_ ) as f:
_A : List[Any] = f.read()
with open(snake_case_ ) as f:
_A : List[Any] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""",[True, False] )
@pytest.mark.parametrize("""default_cache_dir""",[True, False] )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Union[str, Any] = """custom_cache"""
_A : Any = """custom_extracted_dir"""
_A : Union[str, Any] = tmp_path / """custom_extracted_path"""
if default_extracted:
_A : int = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""",snake_case_ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""",str(snake_case_ ) )
_A : List[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_A : Dict = xz_file
_A : Optional[int] = (
DownloadConfig(extract_compressed_file=snake_case_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir,extract_compressed_file=snake_case_ )
)
_A : int = cached_path(snake_case_,download_config=snake_case_ )
assert Path(snake_case_ ).parent.parts[-2:] == expected
def lowerCAmelCase_ ( snake_case_ ):
# absolute path
_A : int = str(Path(snake_case_ ).resolve() )
assert cached_path(snake_case_ ) == text_file
# relative path
_A : str = str(Path(snake_case_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(snake_case_ ) == text_file
def lowerCAmelCase_ ( snake_case_ ):
# absolute path
_A : Union[str, Any] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(snake_case_ ):
cached_path(snake_case_ )
# relative path
_A : Tuple = """./__missing_file__.txt"""
with pytest.raises(snake_case_ ):
cached_path(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[str] = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(snake_case_ ) as f:
_A : int = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""",snake_case_ )
def lowerCAmelCase_ ( ):
with pytest.raises(snake_case_ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""",snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Any = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(snake_case_ ):
http_get("""https://huggingface.co""",temp_file=snake_case_ )
with pytest.raises(snake_case_ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""",snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : str = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(snake_case_ ):
ftp_get("""ftp://huggingface.co""",temp_file=snake_case_ )
with pytest.raises(snake_case_ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""",snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(snake_case_ ):
fsspec_get("""s3://huggingface.co""",temp_file=snake_case_ )
with pytest.raises(snake_case_ ):
fsspec_head("""s3://huggingface.co""" )
| 54
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_A : List[Any] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> int:
_A : Dict = """</s>"""
_A : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> Dict:
_A : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_a ) , 1103 )
def a__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> Tuple:
_A : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_A : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
_A : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : Optional[int] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_A : Union[str, Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[str]:
_A : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : Any = """To ensure a smooth flow of bank resolutions."""
_A : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = ["""This is going to be way too long.""" * 150, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Union[str, Any] = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : Tuple = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[Any]:
# fmt: off
_A : List[Any] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Tuple = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[str]:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_A : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : int = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> Dict:
_A : Optional[int] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_A : Any = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 54
| 1
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_=None ):
# Initialise PyTorch model
_A : Union[str, Any] = XLNetConfig.from_json_file(snake_case_ )
_A : List[str] = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
_A : str = finetuning_task
_A : int = GLUE_TASKS_NUM_LABELS[finetuning_task]
_A : int = XLNetForSequenceClassification(snake_case_ )
elif "squad" in finetuning_task:
_A : Optional[Any] = finetuning_task
_A : str = XLNetForQuestionAnswering(snake_case_ )
else:
_A : Optional[int] = XLNetLMHeadModel(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(snake_case_,snake_case_,snake_case_ )
# Save pytorch-model
_A : Union[str, Any] = os.path.join(snake_case_,snake_case_ )
_A : Any = os.path.join(snake_case_,snake_case_ )
print(f'''Save PyTorch model to {os.path.abspath(snake_case_ )}''' )
torch.save(model.state_dict(),snake_case_ )
print(f'''Save configuration file to {os.path.abspath(snake_case_ )}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 54
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 1
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> Optional[int]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(_a )
def a__ ( self ) -> Any:
_A : str = self._create_example_records()
_A : List[Any] = Dataset.from_list(_a )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(_a ):
self.assertDictEqual(_a , example_records[i] )
def a__ ( self ) -> List[str]:
_A : Dict = self._create_example_records()
_A : List[str] = Dataset.from_list(_a )
_A : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a__ ( self ) -> str: # checks what happens with missing columns
_A : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A : List[str] = Dataset.from_list(_a )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a__ ( self ) -> Dict: # checks if the type can be inferred from the second record
_A : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A : str = Dataset.from_list(_a )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a__ ( self ) -> Dict:
_A : List[str] = Dataset.from_list([] )
self.assertEqual(len(_a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 54
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> Optional[int]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(_a )
def a__ ( self ) -> Any:
_A : str = self._create_example_records()
_A : List[Any] = Dataset.from_list(_a )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(_a ):
self.assertDictEqual(_a , example_records[i] )
def a__ ( self ) -> List[str]:
_A : Dict = self._create_example_records()
_A : List[str] = Dataset.from_list(_a )
_A : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a__ ( self ) -> str: # checks what happens with missing columns
_A : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A : List[str] = Dataset.from_list(_a )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a__ ( self ) -> Dict: # checks if the type can be inferred from the second record
_A : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A : str = Dataset.from_list(_a )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a__ ( self ) -> Dict:
_A : List[str] = Dataset.from_list([] )
self.assertEqual(len(_a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 54
| 1
|
def lowerCAmelCase_ ( snake_case_ = 200 ):
_A : Optional[Any] = [1, 2, 5, 10, 20, 50, 100, 200]
_A : List[str] = [0] * (pence + 1)
_A : int = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(snake_case_,pence + 1,1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 54
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = list(snake_case_ )
_A : List[Any] = list(snake_case_ )
_A : Tuple = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
_A : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = []
while True:
_A : int = ["""$"""] * len(snake_case_ )
_A : Any = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1,len(snake_case_ ) ):
_A : Tuple = compare_string(binary[i],binary[j] )
if k is False:
_A : str = """*"""
_A : str = """*"""
temp.append("""X""" )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
_A : Dict = list(set(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = []
for minterm in minterms:
_A : Tuple = """"""
for _ in range(snake_case_ ):
_A : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = list(snake_case_ )
_A : Tuple = list(snake_case_ )
_A : Dict = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = []
_A : str = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
_A : Union[str, Any] = 0
_A : Optional[Any] = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
_A : Dict = j
if count == 1:
_A : int = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
_A : int = 0
temp.append(prime_implicants[i] )
while True:
_A : Optional[Any] = 0
_A : Tuple = -1
_A : List[Any] = 0
for i in range(len(snake_case_ ) ):
_A : List[str] = chart[i].count(1 )
if count_n > max_n:
_A : Optional[int] = count_n
_A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
_A : Optional[int] = 0
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
_A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i],binary[j],snake_case_ ):
_A : Union[str, Any] = 1
return chart
def lowerCAmelCase_ ( ):
_A : Dict = int(input("""Enter the no. of variables\n""" ) )
_A : Dict = [
float(snake_case_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_A : int = decimal_to_binary(snake_case_,snake_case_ )
_A : Optional[Any] = check(snake_case_ )
print("""Prime Implicants are:""" )
print(snake_case_ )
_A : int = prime_implicant_chart(snake_case_,snake_case_ )
_A : int = selection(snake_case_,snake_case_ )
print("""Essential Prime Implicants are:""" )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 54
| 1
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if len(snake_case_ ) == 0:
return False
_A : str = len(snake_case_ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint],snake_case_ )
else:
return binary_search(a_list[midpoint + 1 :],snake_case_ )
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by comma:\n").strip()
_snake_case = [int(item.strip()) for item in user_input.split(",")]
_snake_case = int(input("Enter the number to be found in the list:\n").strip())
_snake_case = "" if binary_search(sequence, target) else "not "
print(f"""{target} was {not_str}found in {sequence}""")
| 54
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ = None ):
_A : Tuple = word_bank or []
# create a table
_A : int = len(snake_case_ ) + 1
_A : list[list[list[str]]] = []
for _ in range(snake_case_ ):
table.append([] )
# seed value
_A : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case_ )] == word:
_A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case_ )]:
combination.reverse()
return table[len(snake_case_ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 54
| 1
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_snake_case = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
_snake_case = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
_snake_case = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
_snake_case = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
_snake_case = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
_snake_case = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
_snake_case = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def lowerCAmelCase_ ( ):
_A , _A : int = randrange(len(snake_case_ ) ), randrange(len(snake_case_ ) )
_A : Dict = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
_A , _A : List[Any] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCAmelCase_ ( snake_case_ = 100 ):
return (generate_random_hand() for _ in range(snake_case_ ))
@pytest.mark.parametrize("""hand, expected""",snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
assert PokerHand(snake_case_ )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""",snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
assert PokerHand(snake_case_ )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""",snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : List[str] = PokerHand(snake_case_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""",snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
assert PokerHand(snake_case_ )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""",snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
assert PokerHand(snake_case_ )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""",snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
assert PokerHand(snake_case_ ).compare_with(PokerHand(snake_case_ ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""",generate_random_hands() )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
assert PokerHand(snake_case_ ).compare_with(PokerHand(snake_case_ ) ) == expected
def lowerCAmelCase_ ( ):
_A : Any = [PokerHand(snake_case_ ) for hand in SORTED_HANDS]
_A : Any = poker_hands.copy()
shuffle(snake_case_ )
_A : Union[str, Any] = chain(sorted(snake_case_ ) )
for index, hand in enumerate(snake_case_ ):
assert hand == poker_hands[index]
def lowerCAmelCase_ ( ):
# Test that five high straights are compared correctly.
_A : List[Any] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=snake_case_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCAmelCase_ ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
_A : str = PokerHand("""2C 4S AS 3D 5C""" )
_A : Union[str, Any] = True
_A : Optional[int] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCAmelCase_ ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
_A : List[str] = 0
_A : int = os.path.abspath(os.path.dirname(snake_case_ ) )
_A : Dict = os.path.join(snake_case_,"""poker_hands.txt""" )
with open(snake_case_ ) as file_hand:
for line in file_hand:
_A : Optional[Any] = line[:14].strip()
_A : List[str] = line[15:].strip()
_A , _A : Union[str, Any] = PokerHand(snake_case_ ), PokerHand(snake_case_ )
_A : Tuple = player.compare_with(snake_case_ )
if output == "Win":
answer += 1
assert answer == 376
| 54
|
import operator
def lowerCAmelCase_ ( snake_case_,snake_case_ = False,snake_case_ = None ):
_A : str = operator.lt if reverse else operator.gt
_A : Optional[Any] = solution or []
if not arr:
return solution
_A : Dict = [arr.pop(0 )]
for i, item in enumerate(snake_case_ ):
if _operator(snake_case_,sublist[-1] ):
sublist.append(snake_case_ )
arr.pop(snake_case_ )
# merging sublist into solution list
if not solution:
solution.extend(snake_case_ )
else:
while sublist:
_A : Union[str, Any] = sublist.pop(0 )
for i, xx in enumerate(snake_case_ ):
if not _operator(snake_case_,snake_case_ ):
solution.insert(snake_case_,snake_case_ )
break
else:
solution.append(snake_case_ )
strand_sort(snake_case_,snake_case_,snake_case_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 54
| 1
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return abs(snake_case_ ) if a == 0 else greatest_common_divisor(b % a,snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_A , _A : Dict = y, x % y
return abs(snake_case_ )
def lowerCAmelCase_ ( ):
try:
_A : Tuple = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
_A : Any = int(nums[0] )
_A : int = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(snake_case_,snake_case_ )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(snake_case_,snake_case_ )}''' )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 54
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_MASKED_LM_MAPPING
_a = TF_MODEL_FOR_MASKED_LM_MAPPING
def a__ ( self ) -> Tuple:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def a__ ( self ) -> Any:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 3_8015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 2_5506, """token_str""": """ accuser"""},
] , )
_A : Tuple = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 3_8015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 2_5506,
"""token_str""": """ accuser""",
},
] , )
_A : List[str] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> str:
_A : Any = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
_A : List[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 3_5676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
] , )
_A : str = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def a__ ( self ) -> Union[str, Any]:
_A : int = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
_A : Optional[Any] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_a , _a )
@slow
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(_a )
@slow
@require_tf
def a__ ( self ) -> Tuple:
_A : str = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(_a )
def a__ ( self , _a ) -> Tuple:
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
_A : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2790,
"""token_str""": """ Lyon""",
},
] , )
_A : Optional[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> Tuple:
_A : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
_A : str = None
_A : Union[str, Any] = None
self.run_pipeline_test(_a , [] )
@require_tf
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
_A : Any = None
_A : Dict = None
self.run_pipeline_test(_a , [] )
def a__ ( self , _a , _a , _a ) -> Any:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
_A : Optional[Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Tuple = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def a__ ( self , _a , _a ) -> Dict:
_A : Dict = fill_masker.tokenizer
_A : List[str] = fill_masker.model
_A : List[str] = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Optional[Any] = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
with self.assertRaises(_a ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_a ):
fill_masker("""This is""" )
self.run_test_top_k(_a , _a )
self.run_test_targets(_a , _a )
self.run_test_top_k_targets(_a , _a )
self.fill_mask_with_duplicate_targets_and_top_k(_a , _a )
self.fill_mask_with_multiple_masks(_a , _a )
def a__ ( self , _a , _a ) -> List[str]:
_A : int = tokenizer.get_vocab()
_A : str = sorted(vocab.keys() )[:2]
# Pipeline argument
_A : Tuple = FillMaskPipeline(model=_a , tokenizer=_a , targets=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Call argument
_A : str = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : int = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Score equivalence
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Optional[int] = [top_mask["""token_str"""] for top_mask in outputs]
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ) == set(_a ):
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
# Raises with invalid
with self.assertRaises(_a ):
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_a ):
_A : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(_a ):
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets="""""" )
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : str = FillMaskPipeline(model=_a , tokenizer=_a , top_k=2 )
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Union[str, Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> List[Any]:
_A : Union[str, Any] = tokenizer.get_vocab()
_A : int = FillMaskPipeline(model=_a , tokenizer=_a )
# top_k=2, ntargets=3
_A : List[str] = sorted(vocab.keys() )[:3]
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_a )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_A : Any = [el["""token_str"""] for el in sorted(_a , key=lambda _a : x["score"] , reverse=_a )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ).issubset(_a ):
_A : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_a )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> str:
_A : Optional[int] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
_A : Optional[Any] = sorted(vocab.keys() )[:3]
_A : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_A : Union[str, Any] = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=_a , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_a ) , 3 )
def a__ ( self , _a , _a ) -> Tuple:
_A : Any = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[Any] = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
| 54
| 1
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=3 , _a=None , ) -> str:
_A : Union[str, Any] = parent
_A : Union[str, Any] = batch_size
_A : Optional[int] = image_size
_A : Any = patch_size
_A : int = num_channels
_A : Tuple = is_training
_A : int = use_labels
_A : str = hidden_size
_A : Any = num_hidden_layers
_A : int = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : Dict = hidden_act
_A : int = hidden_dropout_prob
_A : Union[str, Any] = attention_probs_dropout_prob
_A : Dict = type_sequence_label_size
_A : Optional[Any] = initializer_range
_A : List[str] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A : Optional[int] = (image_size // patch_size) ** 2
_A : Any = num_patches + 1
def a__ ( self ) -> Any:
_A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : Tuple = None
if self.use_labels:
_A : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> Any:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a ) -> Optional[int]:
_A : Tuple = TFViTModel(config=_a )
_A : Optional[int] = model(_a , training=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_A : List[Any] = self.image_size // 2
_A : Optional[Any] = pixel_values[:, :, :image_size, :image_size]
_A : Union[str, Any] = model(_a , interpolate_pos_encoding=_a , training=_a )
_A : Union[str, Any] = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a ) -> Optional[Any]:
_A : Optional[Any] = self.type_sequence_label_size
_A : int = TFViTForImageClassification(_a )
_A : Tuple = model(_a , labels=_a , training=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_A : Tuple = self.image_size // 2
_A : int = pixel_values[:, :, :image_size, :image_size]
_A : str = model(_a , interpolate_pos_encoding=_a , training=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A : Optional[int] = 1
_A : List[Any] = TFViTForImageClassification(_a )
_A : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A : List[Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ) -> Any:
_A : List[Any] = self.prepare_config_and_inputs()
_A , _A , _A : str = config_and_inputs
_A : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_a = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
_a = False
_a = False
_a = False
def a__ ( self ) -> Any:
_A : List[Any] = TFViTModelTester(self )
_A : Dict = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> Any:
_A , _A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_A : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , tf.keras.layers.Layer ) )
def a__ ( self ) -> Union[str, Any]:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Optional[int] = model_class(_a )
_A : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : int = [*signature.parameters.keys()]
_A : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> Any:
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> List[Any]:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> Tuple:
_A : str = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> Optional[Any]:
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Dict:
_A : List[Any] = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
_A : Tuple = self.default_image_processor
_A : Any = prepare_img()
_A : List[Any] = image_processor(images=_a , return_tensors="""tf""" )
# forward pass
_A : str = model(**_a )
# verify the logits
_A : List[str] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Dict = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _a , atol=1e-4 )
| 54
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def a__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
_A : int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_A : str = dict(zip(_a , range(len(_a ) ) ) )
_A : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
_A : str = {"""unk_token""": """<unk>"""}
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , **_a ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> str:
_A : Tuple = """lower newer"""
_A : Optional[Any] = """lower newer"""
return input_text, output_text
def a__ ( self ) -> List[Any]:
_A : Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A : str = """lower newer"""
_A : List[str] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
_A : int = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_A : str = tokens + [tokenizer.unk_token]
_A : Dict = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
@require_ftfy
def a__ ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : Dict = self.tokenizer_class.from_pretrained(_a , **_a )
_A : str = self.rust_tokenizer_class.from_pretrained(_a , **_a )
_A : List[str] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_A : Tuple = """xa\u0303y""" + """ """ + """x\xe3y"""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of space type
_A : Any = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_A : Optional[int] = tokenizer_s.tokenize(_a )
_A : List[Any] = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of line break type
_A : int = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_A : Optional[Any] = tokenizer_s.tokenize(_a )
_A : Tuple = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : List[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_A : str = F'''{text_of_1_token} {text_of_1_token}'''
_A : str = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Dict = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
_A : Any = F''' {text}'''
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Optional[int] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , )
def a__ ( self ) -> List[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_a ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def a__ ( self ) -> str:
super().test_tokenization_python_rust_equals()
def a__ ( self ) -> Union[str, Any]:
# CLIP always lower cases letters
pass
| 54
| 1
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
_snake_case = {"allegro/herbert-base-cased": 514}
_snake_case = {}
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_INIT_CONFIGURATION
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = HerbertTokenizer
def __init__( self , _a=None , _a=None , _a=None , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a="</s>" , **_a , ) -> Optional[int]:
super().__init__(
_a , _a , tokenizer_file=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , sep_token=_a , **_a , )
def a__ ( self , _a , _a = None ) -> List[int]:
_A : List[Any] = [self.cls_token_id]
_A : int = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def a__ ( self , _a , _a = None ) -> List[int]:
_A : int = [self.sep_token_id]
_A : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self , _a , _a = None ) -> Tuple[str]:
_A : List[Any] = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 54
|
from datetime import datetime as dt
import os
from github import Github
_snake_case = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCAmelCase_ ( ):
_A : int = Github(os.environ["""GITHUB_TOKEN"""] )
_A : Tuple = g.get_repo("""huggingface/transformers""" )
_A : Dict = repo.get_issues(state="""open""" )
for issue in open_issues:
_A : Optional[Any] = sorted([comment for comment in issue.get_comments()],key=lambda snake_case_ : i.created_at,reverse=snake_case_ )
_A : Dict = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 54
| 1
|
class lowercase :
def __init__( self ) -> Optional[Any]:
_A : int = 0
_A : Any = 0
_A : Union[str, Any] = {}
def a__ ( self , _a ) -> int:
if vertex not in self.adjacency:
_A : Any = {}
self.num_vertices += 1
def a__ ( self , _a , _a , _a ) -> str:
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_A : Optional[int] = weight
_A : int = weight
def a__ ( self ) -> List[str]:
_A : List[str] = self.get_edges()
for edge in edges:
_A , _A , _A : Tuple = edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_A : Optional[int] = list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_A : Optional[Any] = edges[i][2] + 1
for edge in edges:
_A , _A , _A : Any = edge
_A : Any = weight
_A : Optional[Any] = weight
def __str__( self ) -> Optional[int]:
_A : Any = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_A : str = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def a__ ( self ) -> Tuple:
_A : str = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ ( self ) -> str:
return self.adjacency.keys()
@staticmethod
def a__ ( _a=None , _a=None ) -> Any:
_A : str = Graph()
if vertices is None:
_A : List[Any] = []
if edges is None:
_A : Optional[int] = []
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class lowercase :
def __init__( self ) -> Optional[Any]:
_A : int = {}
_A : List[str] = {}
def __len__( self ) -> Optional[int]:
return len(self.parent )
def a__ ( self , _a ) -> Union[str, Any]:
if item in self.parent:
return self.find(_a )
_A : Union[str, Any] = item
_A : Any = 0
return item
def a__ ( self , _a ) -> Tuple:
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_A : Tuple = self.find(self.parent[item] )
return self.parent[item]
def a__ ( self , _a , _a ) -> Optional[int]:
_A : int = self.find(_a )
_A : Any = self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_A : Optional[int] = roota
return roota
if self.rank[roota] < self.rank[roota]:
_A : Optional[int] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_A : Optional[int] = roota
return roota
return None
@staticmethod
def a__ ( _a ) -> int:
_A : List[Any] = graph.num_vertices
_A : Optional[int] = Graph.UnionFind()
_A : List[str] = []
while num_components > 1:
_A : Union[str, Any] = {}
for vertex in graph.get_vertices():
_A : List[Any] = -1
_A : List[str] = graph.get_edges()
for edge in edges:
_A , _A , _A : Any = edge
edges.remove((tail, head, weight) )
for edge in edges:
_A , _A , _A : int = edge
_A : Tuple = union_find.find(_a )
_A : str = union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_A : Optional[Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_A : List[str] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_A , _A , _A : Union[str, Any] = cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_A : Any = num_components - 1
_A : List[str] = Graph.build(edges=_a )
return mst
| 54
|
from __future__ import annotations
class lowercase :
def __init__( self , _a = 0 ) -> str:
_A : Any = key
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_a , _a ) )
except OSError:
return False
return True
def a__ ( self , _a , _a ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_a , _a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 54
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = tempfile.mkdtemp()
# fmt: off
_A : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
_A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_A : Optional[Any] = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
_A : List[Any] = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_a , _a )
def a__ ( self , **_a ) -> int:
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , **_a ) -> List[str]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def a__ ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> List[str]:
_A : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_A : int = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self ) -> Dict:
_A : Any = self.get_tokenizer()
_A : int = self.get_image_processor()
_A : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_A : int = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def a__ ( self ) -> int:
_A : List[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_A : Dict = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_A : Tuple = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def a__ ( self ) -> Optional[int]:
_A : List[Any] = self.get_image_processor()
_A : Optional[Any] = self.get_tokenizer()
_A : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_A : List[str] = self.prepare_image_inputs()
_A : List[str] = image_processor(_a , return_tensors="""np""" )
_A : Optional[Any] = processor(images=_a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a__ ( self ) -> List[Any]:
_A : Optional[Any] = self.get_image_processor()
_A : Optional[int] = self.get_tokenizer()
_A : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_A : Dict = """lower newer"""
_A : Tuple = processor(text=_a )
_A : Tuple = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a__ ( self ) -> int:
_A : List[str] = self.get_image_processor()
_A : Any = self.get_tokenizer()
_A : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_A : Tuple = """lower newer"""
_A : Any = self.prepare_image_inputs()
_A : List[str] = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def a__ ( self ) -> Union[str, Any]:
_A : Dict = self.get_image_processor()
_A : Optional[Any] = self.get_tokenizer()
_A : Dict = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A : int = processor.batch_decode(_a )
_A : Union[str, Any] = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> Optional[Any]:
_A : int = self.get_image_processor()
_A : Any = self.get_tokenizer()
_A : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_A : int = """lower newer"""
_A : Optional[int] = self.prepare_image_inputs()
_A : int = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 54
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=10 , _a=160 , _a=8 , _a=0.0 , _a=4000 , _a=False , _a=True , ) -> Optional[int]:
_A : Any = parent
_A : List[Any] = batch_size
_A : List[Any] = min_seq_length
_A : Dict = max_seq_length
_A : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = padding_value
_A : Tuple = sampling_rate
_A : str = return_attention_mask
_A : Any = do_normalize
_A : Union[str, Any] = feature_size
_A : List[Any] = chunk_length
_A : List[Any] = hop_length
def a__ ( self ) -> List[str]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , _a=False , _a=False ) -> List[str]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : Any = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = WhisperFeatureExtractor if is_speech_available() else None
def a__ ( self ) -> Tuple:
_A : Optional[int] = WhisperFeatureExtractionTester(self )
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Tuple = feat_extract_first.to_dict()
_A : List[Any] = feat_extract_second.to_dict()
_A : List[Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Dict:
_A : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Dict = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Optional[int] = self.feature_extraction_class.from_json_file(_a )
_A : str = feat_extract_first.to_dict()
_A : Any = feat_extract_second.to_dict()
_A : Union[str, Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Any = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_A : Dict = feature_extractor(_a , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_A : List[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_A : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : Tuple = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_A : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Any = np.asarray(_a )
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : int = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test truncation required
_A : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs]
_A : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs_truncated]
_A : Optional[int] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : List[Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> Dict:
import torch
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
_A : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_A : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ ( self , _a ) -> Dict:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Optional[Any] = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Tuple:
# fmt: off
_A : Dict = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_A : Dict = self._load_datasamples(1 )
_A : Optional[Any] = WhisperFeatureExtractor()
_A : Optional[Any] = feature_extractor(_a , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1e-4 ) )
def a__ ( self ) -> str:
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : str = self._load_datasamples(1 )[0]
_A : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_A : List[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1e-3 ) )
| 54
| 1
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Tuple:
super().__init__()
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=_a , speech_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , feature_extractor=_a , )
def a__ ( self , _a = "auto" ) -> List[Any]:
if slice_size == "auto":
_A : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def a__ ( self ) -> Tuple:
self.enable_attention_slicing(_a )
@torch.no_grad()
def __call__( self , _a , _a=1_6000 , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ) -> int:
_A : Dict = self.speech_processor.feature_extractor(
_a , return_tensors="""pt""" , sampling_rate=_a ).input_features.to(self.device )
_A : Optional[int] = self.speech_model.generate(_a , max_length=48_0000 )
_A : Union[str, Any] = self.speech_processor.tokenizer.batch_decode(_a , skip_special_tokens=_a , normalize=_a )[
0
]
if isinstance(_a , _a ):
_A : Any = 1
elif isinstance(_a , _a ):
_A : Optional[int] = len(_a )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_a )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_a , _a ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_a )}.''' )
# get prompt text embeddings
_A : List[Any] = self.tokenizer(
_a , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
_A : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_A : List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_A : str = text_input_ids[:, : self.tokenizer.model_max_length]
_A : Optional[int] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_A , _A , _A : List[Any] = text_embeddings.shape
_A : Tuple = text_embeddings.repeat(1 , _a , 1 )
_A : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , _a , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_A : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_A : List[str]
if negative_prompt is None:
_A : List[Any] = [""""""] * batch_size
elif type(_a ) is not type(_a ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(_a )} !='''
F''' {type(_a )}.''' )
elif isinstance(_a , _a ):
_A : str = [negative_prompt]
elif batch_size != len(_a ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(_a )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
_A : Optional[int] = negative_prompt
_A : Tuple = text_input_ids.shape[-1]
_A : Dict = self.tokenizer(
_a , padding="""max_length""" , max_length=_a , truncation=_a , return_tensors="""pt""" , )
_A : int = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_A : Optional[Any] = uncond_embeddings.shape[1]
_A : Dict = uncond_embeddings.repeat(1 , _a , 1 )
_A : str = uncond_embeddings.view(batch_size * num_images_per_prompt , _a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A : Union[str, Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_A : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_A : Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_A : List[str] = torch.randn(_a , generator=_a , device="""cpu""" , dtype=_a ).to(
self.device )
else:
_A : Dict = torch.randn(_a , generator=_a , device=self.device , dtype=_a )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_A : Any = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_A : int = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_A : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A : List[str] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_A : Optional[int] = {}
if accepts_eta:
_A : Dict = eta
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A : Dict = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
_A : Optional[Any] = self.unet(_a , _a , encoder_hidden_states=_a ).sample
# perform guidance
if do_classifier_free_guidance:
_A , _A : Dict = noise_pred.chunk(2 )
_A : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_A : str = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_a , _a , _a )
_A : List[str] = 1 / 0.18215 * latents
_A : List[str] = self.vae.decode(_a ).sample
_A : int = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_A : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A : Dict = self.numpy_to_pil(_a )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_a , nsfw_content_detected=_a )
| 54
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase_ ( snake_case_ ):
return data[1:] + data[0]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = """"""
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = int("""0b""" + data[0] + data[-1],2 )
_A : Any = int("""0b""" + data[1:3],2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[str] = message[:4]
_A : List[Any] = message[4:]
_A : Union[str, Any] = apply_table(snake_case_,snake_case_ )
_A : List[Any] = xor(snake_case_,snake_case_ )
_A : Optional[Any] = apply_sbox(snake_case_,temp[:4] ) # noqa: E741
_A : List[Any] = apply_sbox(snake_case_,temp[4:] )
_A : int = """0""" * (2 - len(snake_case_ )) + l # noqa: E741
_A : Union[str, Any] = """0""" * (2 - len(snake_case_ )) + r
_A : List[Any] = apply_table(l + r,snake_case_ )
_A : Any = xor(snake_case_,snake_case_ )
return temp + right
if __name__ == "__main__":
_snake_case = input("Enter 10 bit key: ")
_snake_case = input("Enter 8 bit message: ")
_snake_case = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case = [2, 4, 3, 1]
_snake_case = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case = apply_table(key, paa_table)
_snake_case = temp[:5]
_snake_case = temp[5:]
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
# encryption
_snake_case = apply_table(message, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_snake_case = apply_table(CT, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 54
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a__ ( self ) -> Union[str, Any]:
_A : Dict = 1
_A : List[Any] = 3
_A : str = (32, 32)
_A : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
@property
def a__ ( self ) -> str:
torch.manual_seed(0 )
_A : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def a__ ( self ) -> int:
torch.manual_seed(0 )
_A : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def a__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_A : Union[str, Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(_a )
@property
def a__ ( self ) -> int:
def extract(*_a , **_a ):
class lowercase :
def __init__( self ) -> Any:
_A : List[Any] = torch.ones([0] )
def a__ ( self , _a ) -> Optional[Any]:
self.pixel_values.to(_a )
return self
return Out()
return extract
def a__ ( self ) -> Tuple:
_A : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : int = self.dummy_cond_unet
_A : List[Any] = PNDMScheduler(skip_prk_steps=_a )
_A : Optional[int] = self.dummy_vae
_A : Dict = self.dummy_text_encoder
_A : Union[str, Any] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
_A : List[Any] = 77
_A : str = self.dummy_image.to(_a )
_A : Any = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_A : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
_A : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_a )
_A : Optional[int] = alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
_A : List[str] = """A painting of a squirrel eating a burger"""
_A : Union[str, Any] = torch.Generator(device=_a ).manual_seed(0 )
_A : Any = alt_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=_a , )
_A : Any = output.images
_A : int = torch.Generator(device=_a ).manual_seed(0 )
_A : Tuple = alt_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=_a , return_dict=_a , )[0]
_A : List[Any] = image[0, -3:, -3:, -1]
_A : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A : Dict = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def a__ ( self ) -> Optional[Any]:
_A : int = self.dummy_cond_unet
_A : List[str] = PNDMScheduler(skip_prk_steps=_a )
_A : List[str] = self.dummy_vae
_A : Tuple = self.dummy_text_encoder
_A : str = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
_A : str = 77
_A : List[Any] = self.dummy_image.to(_a )
# put models in fp16
_A : List[str] = unet.half()
_A : Union[str, Any] = vae.half()
_A : Tuple = bert.half()
# make sure here that pndm scheduler skips prk
_A : int = AltDiffusionImgaImgPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
_A : Optional[int] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_a )
_A : Tuple = alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
_A : int = """A painting of a squirrel eating a burger"""
_A : Tuple = torch.manual_seed(0 )
_A : List[str] = alt_pipe(
[prompt] , generator=_a , num_inference_steps=2 , output_type="""np""" , image=_a , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def a__ ( self ) -> str:
_A : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
_A : Union[str, Any] = init_image.resize((760, 504) )
_A : Union[str, Any] = """BAAI/AltDiffusion"""
_A : Any = AltDiffusionImgaImgPipeline.from_pretrained(
_a , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_A : int = """A fantasy landscape, trending on artstation"""
_A : Tuple = torch.manual_seed(0 )
_A : int = pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , generator=_a , output_type="""np""" , )
_A : int = output.images[0]
_A : List[str] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
_A : Dict = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> Tuple:
_A : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_A : str = init_image.resize((768, 512) )
_A : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
_A : str = """BAAI/AltDiffusion"""
_A : str = AltDiffusionImgaImgPipeline.from_pretrained(
_a , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_A : Dict = """A fantasy landscape, trending on artstation"""
_A : str = torch.manual_seed(0 )
_A : int = pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , generator=_a , output_type="""np""" , )
_A : int = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 54
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Tuple:
_A : Any = size if size is not None else {"""height""": 18, """width""": 18}
_A : Optional[Any] = parent
_A : Union[str, Any] = batch_size
_A : List[Any] = num_channels
_A : List[str] = image_size
_A : Optional[Any] = min_resolution
_A : List[Any] = max_resolution
_A : Optional[Any] = do_resize
_A : str = size
_A : List[str] = do_normalize
_A : Dict = image_mean
_A : int = image_std
def a__ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DPTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = DPTImageProcessingTester(self )
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_A : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def a__ ( self ) -> List[Any]:
# Initialize image_processing
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> List[str]:
# Initialize image_processing
_A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 54
| 1
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class lowercase ( UpperCamelCase__ ):
def __init__( self , *_a , **_a ) -> Union[str, Any]:
super().__init__(*_a , **_a )
requires_backends(self , """vision""" )
self.check_model_type(_a )
def __call__( self , _a , **_a ) -> Any:
return super().__call__(_a , **_a )
def a__ ( self , **_a ) -> str:
return {}, {}, {}
def a__ ( self , _a ) -> List[Any]:
_A : Any = load_image(_a )
_A : Dict = image.size
_A : Optional[int] = self.image_processor(images=_a , return_tensors=self.framework )
return model_inputs
def a__ ( self , _a ) -> int:
_A : Optional[int] = self.model(**_a )
return model_outputs
def a__ ( self , _a ) -> str:
_A : Any = model_outputs.predicted_depth
_A : Dict = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=_a )
_A : Tuple = prediction.squeeze().cpu().numpy()
_A : Optional[int] = (output * 255 / np.max(_a )).astype("""uint8""" )
_A : Optional[Any] = Image.fromarray(_a )
_A : List[str] = {}
_A : str = predicted_depth
_A : Optional[int] = depth
return output_dict
| 54
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_snake_case = logging.get_logger(__name__)
_snake_case = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a=None , _a=None , *_a , **_a ) -> Optional[int]:
super().__init__(*_a , **_a )
if config is None:
assert isinstance(self.model , _a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
_A : Optional[Any] = self.model.config
else:
_A : int = config
_A : Optional[Any] = data_args
_A : int = self.config.tgt_vocab_size if isinstance(self.config , _a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_A : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_A : Union[str, Any] = label_smoothed_nll_loss
def a__ ( self , _a ) -> int:
if self.optimizer is None:
_A : List[str] = ["""bias""", """LayerNorm.weight"""]
_A : str = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_A : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_A : Dict = Adafactor
_A : int = {"""scale_parameter""": False, """relative_step""": False}
else:
_A : int = AdamW
_A : Any = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_A : List[str] = self.args.learning_rate
if self.sharded_ddp:
_A : List[str] = OSS(
params=_a , optim=_a , **_a , )
else:
_A : Tuple = optimizer_cls(_a , **_a )
if self.lr_scheduler is None:
_A : Union[str, Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def a__ ( self , _a ) -> Dict:
_A : List[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_A : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_A : Optional[int] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_A : List[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_a )
return scheduler
def a__ ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def a__ ( self , _a , _a , _a ) -> List[str]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_A : List[str] = model(**_a , use_cache=_a )[0]
_A : Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_A , _A : str = model(**_a , labels=_a , use_cache=_a )[:2]
else:
# compute label smoothed loss
_A : Any = model(**_a , use_cache=_a )[0]
_A : Union[str, Any] = torch.nn.functional.log_softmax(_a , dim=-1 )
_A , _A : List[str] = self.loss_fn(_a , _a , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def a__ ( self , _a , _a ) -> List[Any]:
_A : Optional[int] = inputs.pop("""labels""" )
_A , _A : Dict = self._compute_loss(_a , _a , _a )
return loss
def a__ ( self , _a , _a , _a , _a = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
_A : int = self._prepare_inputs(_a )
_A : Dict = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_A : List[str] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **_a , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_A : str = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
_A : Any = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_A , _A : Tuple = self._compute_loss(_a , _a , _a )
_A : Any = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_A : List[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_A : Any = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def a__ ( self , _a , _a ) -> Union[str, Any]:
# If PAD token is not defined at least EOS token has to be defined
_A : Optional[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
F''' padded to `max_length`={max_length}''' )
_A : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_A : Dict = tensor
return padded_tensor
| 54
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case = logging.get_logger(__name__)
class lowercase ( UpperCamelCase__ ):
_a = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = None , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ) -> None:
super().__init__(**_a )
_A : Optional[Any] = size if size is not None else {"""shortest_edge""": 256}
_A : List[str] = get_size_dict(_a , default_to_square=_a )
_A : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_A : int = get_size_dict(_a )
_A : Union[str, Any] = do_resize
_A : Union[str, Any] = size
_A : Union[str, Any] = resample
_A : Dict = do_center_crop
_A : str = crop_size
_A : Dict = do_rescale
_A : Tuple = rescale_factor
_A : Any = do_normalize
_A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray:
_A : List[str] = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_A : int = get_resize_output_image_size(_a , size=size["""shortest_edge"""] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def a__ ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
_A : Union[str, Any] = get_size_dict(_a )
return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a )
def a__ ( self , _a , _a , _a = None , **_a ) -> np.ndarray:
return rescale(_a , scale=_a , data_format=_a , **_a )
def a__ ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def a__ ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> List[str]:
_A : Dict = do_resize if do_resize is not None else self.do_resize
_A : Any = size if size is not None else self.size
_A : List[str] = get_size_dict(_a , default_to_square=_a )
_A : Optional[int] = resample if resample is not None else self.resample
_A : int = do_center_crop if do_center_crop is not None else self.do_center_crop
_A : str = crop_size if crop_size is not None else self.crop_size
_A : List[Any] = get_size_dict(_a )
_A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_A : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_A : Any = do_normalize if do_normalize is not None else self.do_normalize
_A : Optional[Any] = image_mean if image_mean is not None else self.image_mean
_A : List[Any] = image_std if image_std is not None else self.image_std
_A : Any = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_A : int = [to_numpy_array(_a ) for image in images]
if do_resize:
_A : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
_A : str = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
_A : Optional[int] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_A : Any = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_A : str = [to_channel_dimension_format(_a , _a ) for image in images]
_A : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=_a , tensor_type=_a )
| 54
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( snake_case_ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case_,snake_case_ ) -> bool:
_A : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A : Optional[int] = mean(
int(is_in_circle(uniform(-1.0,1.0 ),uniform(-1.0,1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
_A : List[str] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0.0,snake_case_ = 1.0,):
return mean(
function_to_integrate(uniform(snake_case_,snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.0,snake_case_ = 1.0 ):
def identity_function(snake_case_ ) -> float:
return x
_A : Any = area_under_curve_estimator(
snake_case_,snake_case_,snake_case_,snake_case_ )
_A : Tuple = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def lowerCAmelCase_ ( snake_case_ ):
def function_to_integrate(snake_case_ ) -> float:
return sqrt(4.0 - x * x )
_A : Optional[int] = area_under_curve_estimator(
snake_case_,snake_case_,0.0,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
| 1
|
class lowercase :
def __init__( self ) -> None:
_A : dict[str, TrieNode] = {} # Mapping from char to TrieNode
_A : Optional[Any] = False
def a__ ( self , _a ) -> None:
for word in words:
self.insert(_a )
def a__ ( self , _a ) -> None:
_A : Optional[Any] = self
for char in word:
if char not in curr.nodes:
_A : Optional[Any] = TrieNode()
_A : List[Any] = curr.nodes[char]
_A : Optional[int] = True
def a__ ( self , _a ) -> bool:
_A : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
return False
_A : str = curr.nodes[char]
return curr.is_leaf
def a__ ( self , _a ) -> None:
def _delete(_a , _a , _a ) -> bool:
if index == len(_a ):
# If word does not exist
if not curr.is_leaf:
return False
_A : Any = False
return len(curr.nodes ) == 0
_A : List[Any] = word[index]
_A : Optional[Any] = curr.nodes.get(_a )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_A : List[str] = _delete(_a , _a , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _a , 0 )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if node.is_leaf:
print(snake_case_,end=""" """ )
for key, value in node.nodes.items():
print_words(snake_case_,word + key )
def lowerCAmelCase_ ( ):
_A : int = """banana bananas bandana band apple all beast""".split()
_A : Optional[int] = TrieNode()
root.insert_many(snake_case_ )
# print_words(root, "")
assert all(root.find(snake_case_ ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
print(str(snake_case_ ),"""works!""" if passes else """doesn't work :(""" )
def lowerCAmelCase_ ( ):
assert test_trie()
def lowerCAmelCase_ ( ):
print_results("""Testing trie functionality""",test_trie() )
if __name__ == "__main__":
main()
| 54
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> int:
_A : int = tempfile.mkdtemp()
_A : Union[str, Any] = 8
# DPR tok
_A : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_A : List[str] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_A : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_A : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_A : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_A : Dict = {"""unk_token""": """<unk>"""}
_A : Optional[Any] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_A : List[Any] = os.path.join(_a , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def a__ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def a__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def a__ ( self ) -> str:
_A : Optional[Any] = os.path.join(self.tmpdirname , """rag_tokenizer""" )
_A : int = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_A : Any = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_a )
rag_tokenizer.save_pretrained(_a )
_A : Optional[Any] = RagTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def a__ ( self ) -> str:
_A : Union[str, Any] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
_A : Tuple = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Tuple = tokenizer(_a )
self.assertIsNotNone(_a )
@slow
def a__ ( self ) -> Dict:
_A : Dict = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
_A : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Optional[Any] = tokenizer(_a )
self.assertIsNotNone(_a )
| 54
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
_A : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_A : str = self.transformer_dir
shutil.copy(
os.path.join(_a , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def a__ ( self ) -> Optional[int]:
_A : List[str] = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def a__ ( self , _a , _a , _a , _a=None ) -> Optional[Any]:
_A : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_A : List[str] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_A : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_A : Optional[int] = black.format_str(_a , mode=_a )
_A : Optional[Any] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_a , """w""" , newline="""\n""" ) as f:
f.write(_a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_a )
with open(_a , """r""" ) as f:
self.assertTrue(f.read() , _a )
def a__ ( self ) -> str:
_A : Union[str, Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _a , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _a ) , )
# Copy consistency with a really long name
_A : List[str] = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , _a , _a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _a , overwrite_result=re.sub("""Bert""" , """TestModel""" , _a ) , )
def a__ ( self ) -> Tuple:
_A : Union[str, Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Any = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_A , _A : Tuple = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
self.assertFalse(_a )
self.assertEqual(_a , _a )
_A , _A : List[str] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_a )
_A : Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_A : Dict = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Optional[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A , _A : Optional[int] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_a , _a )
| 54
| 1
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
@register_to_config
def __init__( self , _a = 128 , _a = 256 , _a = 2000.0 , _a = 768 , _a = 12 , _a = 12 , _a = 64 , _a = 2048 , _a = 0.1 , ) -> Tuple:
super().__init__()
_A : Optional[int] = nn.Sequential(
nn.Linear(_a , d_model * 4 , bias=_a ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_a ) , nn.SiLU() , )
_A : Tuple = nn.Embedding(_a , _a )
_A : str = False
_A : Optional[int] = nn.Linear(_a , _a , bias=_a )
_A : Optional[Any] = nn.Dropout(p=_a )
_A : Dict = nn.ModuleList()
for lyr_num in range(_a ):
# FiLM conditional T5 decoder
_A : int = DecoderLayer(d_model=_a , d_kv=_a , num_heads=_a , d_ff=_a , dropout_rate=_a )
self.decoders.append(_a )
_A : List[str] = TaLayerNorm(_a )
_A : Tuple = nn.Dropout(p=_a )
_A : str = nn.Linear(_a , _a , bias=_a )
def a__ ( self , _a , _a ) -> Tuple:
_A : Optional[int] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def a__ ( self , _a , _a , _a ) -> Optional[Any]:
_A , _A , _A : Any = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_A : int = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_A : int = self.conditioning_emb(_a ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_A : List[str] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_A : int = torch.broadcast_to(
torch.arange(_a , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_A : List[Any] = self.position_encoding(_a )
_A : Optional[int] = self.continuous_inputs_projection(_a )
inputs += position_encodings
_A : Dict = self.dropout(_a )
# decoder: No padding present.
_A : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_A : List[str] = [(x, self.encoder_decoder_mask(_a , _a )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_A : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_A : List[Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_A : Tuple = lyr(
_a , conditioning_emb=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )[0]
_A : str = self.decoder_norm(_a )
_A : Dict = self.post_dropout(_a )
_A : List[str] = self.spec_out(_a )
return spec_out
class lowercase ( nn.Module ):
def __init__( self , _a , _a , _a , _a , _a , _a=1e-6 ) -> List[Any]:
super().__init__()
_A : Tuple = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_a , d_kv=_a , num_heads=_a , dropout_rate=_a ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_a , d_kv=_a , num_heads=_a , dropout_rate=_a , layer_norm_epsilon=_a , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_a , d_ff=_a , dropout_rate=_a , layer_norm_epsilon=_a ) )
def a__ ( self , _a , _a=None , _a=None , _a=None , _a=None , _a=None , ) -> Union[str, Any]:
_A : Any = self.layer[0](
_a , conditioning_emb=_a , attention_mask=_a , )
if encoder_hidden_states is not None:
_A : Any = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
_A : Any = self.layer[1](
_a , key_value_states=_a , attention_mask=_a , )
# Apply Film Conditional Feed Forward layer
_A : Optional[int] = self.layer[-1](_a , _a )
return (hidden_states,)
class lowercase ( nn.Module ):
def __init__( self , _a , _a , _a , _a ) -> Tuple:
super().__init__()
_A : Any = TaLayerNorm(_a )
_A : Optional[int] = TaFiLMLayer(in_features=d_model * 4 , out_features=_a )
_A : Union[str, Any] = Attention(query_dim=_a , heads=_a , dim_head=_a , out_bias=_a , scale_qk=_a )
_A : Any = nn.Dropout(_a )
def a__ ( self , _a , _a=None , _a=None , ) -> Tuple:
# pre_self_attention_layer_norm
_A : List[str] = self.layer_norm(_a )
if conditioning_emb is not None:
_A : int = self.FiLMLayer(_a , _a )
# Self-attention block
_A : List[str] = self.attention(_a )
_A : Optional[Any] = hidden_states + self.dropout(_a )
return hidden_states
class lowercase ( nn.Module ):
def __init__( self , _a , _a , _a , _a , _a ) -> List[str]:
super().__init__()
_A : Union[str, Any] = Attention(query_dim=_a , heads=_a , dim_head=_a , out_bias=_a , scale_qk=_a )
_A : Any = TaLayerNorm(_a , eps=_a )
_A : List[Any] = nn.Dropout(_a )
def a__ ( self , _a , _a=None , _a=None , ) -> Tuple:
_A : Optional[Any] = self.layer_norm(_a )
_A : str = self.attention(
_a , encoder_hidden_states=_a , attention_mask=attention_mask.squeeze(1 ) , )
_A : Any = hidden_states + self.dropout(_a )
return layer_output
class lowercase ( nn.Module ):
def __init__( self , _a , _a , _a , _a ) -> Dict:
super().__init__()
_A : List[Any] = TaDenseGatedActDense(d_model=_a , d_ff=_a , dropout_rate=_a )
_A : str = TaFiLMLayer(in_features=d_model * 4 , out_features=_a )
_A : List[Any] = TaLayerNorm(_a , eps=_a )
_A : Optional[Any] = nn.Dropout(_a )
def a__ ( self , _a , _a=None ) -> int:
_A : Optional[int] = self.layer_norm(_a )
if conditioning_emb is not None:
_A : str = self.film(_a , _a )
_A : Tuple = self.DenseReluDense(_a )
_A : Tuple = hidden_states + self.dropout(_a )
return hidden_states
class lowercase ( nn.Module ):
def __init__( self , _a , _a , _a ) -> str:
super().__init__()
_A : str = nn.Linear(_a , _a , bias=_a )
_A : Optional[int] = nn.Linear(_a , _a , bias=_a )
_A : int = nn.Linear(_a , _a , bias=_a )
_A : Dict = nn.Dropout(_a )
_A : Optional[Any] = NewGELUActivation()
def a__ ( self , _a ) -> Union[str, Any]:
_A : Union[str, Any] = self.act(self.wi_a(_a ) )
_A : Tuple = self.wi_a(_a )
_A : Optional[Any] = hidden_gelu * hidden_linear
_A : Tuple = self.dropout(_a )
_A : str = self.wo(_a )
return hidden_states
class lowercase ( nn.Module ):
def __init__( self , _a , _a=1e-6 ) -> Union[str, Any]:
super().__init__()
_A : Dict = nn.Parameter(torch.ones(_a ) )
_A : List[Any] = eps
def a__ ( self , _a ) -> Tuple:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_A : int = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_a )
_A : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_A : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowercase ( nn.Module ):
def a__ ( self , _a ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044715 * torch.pow(_a , 3.0 )) ))
class lowercase ( nn.Module ):
def __init__( self , _a , _a ) -> int:
super().__init__()
_A : Any = nn.Linear(_a , out_features * 2 , bias=_a )
def a__ ( self , _a , _a ) -> int:
_A : Dict = self.scale_bias(_a )
_A , _A : Union[str, Any] = torch.chunk(_a , 2 , -1 )
_A : Dict = x * (1 + scale) + shift
return x
| 54
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=2048 , _a=128 , _a=1 , _a=512 , _a=30 , _a=4_4100 , ) -> Tuple:
_A : Any = parent
_A : str = batch_size
_A : Union[str, Any] = min_seq_length
_A : int = max_seq_length
_A : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = spectrogram_length
_A : int = feature_size
_A : str = num_audio_channels
_A : Tuple = hop_length
_A : List[str] = chunk_length
_A : Union[str, Any] = sampling_rate
def a__ ( self ) -> Tuple:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a__ ( self , _a=False , _a=False ) -> Optional[int]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : List[Any] = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = TvltFeatureExtractor
def a__ ( self ) -> Any:
_A : int = TvltFeatureExtractionTester(self )
def a__ ( self ) -> List[Any]:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , """spectrogram_length""" ) )
self.assertTrue(hasattr(_a , """feature_size""" ) )
self.assertTrue(hasattr(_a , """num_audio_channels""" ) )
self.assertTrue(hasattr(_a , """hop_length""" ) )
self.assertTrue(hasattr(_a , """chunk_length""" ) )
self.assertTrue(hasattr(_a , """sampling_rate""" ) )
def a__ ( self ) -> Optional[int]:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : int = feat_extract_second.to_dict()
_A : int = dict_first.pop("""mel_filters""" )
_A : Optional[int] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Union[str, Any] = self.feature_extraction_class.from_json_file(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : Union[str, Any] = feat_extract_second.to_dict()
_A : List[Any] = dict_first.pop("""mel_filters""" )
_A : Optional[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Optional[Any]:
# Initialize feature_extractor
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_A : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A : str = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A : Optional[Any] = feature_extractor(
_a , return_tensors="""np""" , sampling_rate=4_4100 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Optional[int] = np.asarray(_a )
_A : Optional[Any] = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def a__ ( self , _a ) -> str:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Dict = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Optional[Any]:
_A : List[str] = self._load_datasamples(1 )
_A : List[str] = TvltFeatureExtractor()
_A : List[Any] = feature_extractor(_a , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A : int = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 54
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
_snake_case = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
_snake_case = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "attention_mask"]
_a = BartTokenizer
def __init__( self , _a=None , _a=None , _a=None , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , _a=True , **_a , ) -> Tuple:
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_A : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _a ) != add_prefix_space:
_A : Tuple = getattr(_a , pre_tok_state.pop("""type""" ) )
_A : Union[str, Any] = add_prefix_space
_A : List[str] = pre_tok_class(**_a )
_A : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_A : int = """post_processor"""
_A : Optional[Any] = getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_A : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_A : str = tuple(state["""sep"""] )
if "cls" in state:
_A : List[str] = tuple(state["""cls"""] )
_A : str = False
if state.get("""add_prefix_space""" , _a ) != add_prefix_space:
_A : Any = add_prefix_space
_A : Dict = True
if state.get("""trim_offsets""" , _a ) != trim_offsets:
_A : List[Any] = trim_offsets
_A : Union[str, Any] = True
if changes_to_apply:
_A : List[str] = getattr(_a , state.pop("""type""" ) )
_A : int = component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
def a__ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def a__ ( self , _a ) -> Tuple:
_A : Union[str, Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_A : Any = value
def a__ ( self , *_a , **_a ) -> BatchEncoding:
_A : Optional[int] = kwargs.get("""is_split_into_words""" , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_a , **_a )
def a__ ( self , *_a , **_a ) -> BatchEncoding:
_A : str = kwargs.get("""is_split_into_words""" , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_a , **_a )
def a__ ( self , _a , _a = None ) -> Tuple[str]:
_A : Union[str, Any] = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def a__ ( self , _a , _a=None ) -> Tuple:
_A : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a__ ( self , _a , _a = None ) -> List[int]:
_A : List[Any] = [self.sep_token_id]
_A : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 54
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 1
|
from __future__ import annotations
from typing import Any
def lowerCAmelCase_ ( snake_case_ ):
if not postfix_notation:
return 0
_A : Any = {"""+""", """-""", """*""", """/"""}
_A : list[Any] = []
for token in postfix_notation:
if token in operations:
_A , _A : Optional[Any] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(snake_case_ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = KandinskyVaaImgaImgPipeline
_a = ["image_embeds", "negative_image_embeds", "image"]
_a = [
"image_embeds",
"negative_image_embeds",
"image",
]
_a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> int:
return 32
@property
def a__ ( self ) -> Union[str, Any]:
return 32
@property
def a__ ( self ) -> List[str]:
return self.time_input_dim
@property
def a__ ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> str:
return 100
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_A : Union[str, Any] = UNetaDConditionModel(**_a )
return model
@property
def a__ ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self ) -> int:
_A : Any = self.dummy_unet
_A : List[Any] = self.dummy_movq
_A : str = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_A : int = DDIMScheduler(**_a )
_A : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self , _a , _a=0 ) -> str:
_A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_A : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : Optional[Any] = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((256, 256) )
if str(_a ).startswith("""mps""" ):
_A : Tuple = torch.manual_seed(_a )
else:
_A : str = torch.Generator(device=_a ).manual_seed(_a )
_A : Optional[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> Union[str, Any]:
_A : Dict = """cpu"""
_A : int = self.get_dummy_components()
_A : Optional[int] = self.pipeline_class(**_a )
_A : Any = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = pipe(**self.get_dummy_inputs(_a ) )
_A : Dict = output.images
_A : List[str] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_A : Dict = image[0, -3:, -3:, -1]
_A : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Optional[int] = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> List[str]:
_A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_A : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_A : Dict = """A red cartoon frog, 4k"""
_A : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_A : int = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_A : Dict = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A , _A : List[str] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_A : int = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 54
| 1
|
import heapq
def lowerCAmelCase_ ( snake_case_ ):
_A : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(snake_case_,[-1 * len(snake_case_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
_A : Optional[int] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_A : str = heapq.heappop(snake_case_ )[1][0]
chosen_vertices.add(snake_case_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_A : List[Any] = elem[1][1].index(snake_case_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(snake_case_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 54
|
def lowerCAmelCase_ ( snake_case_ = 1000000 ):
_A : Any = limit + 1
_A : Tuple = [0] * limit
for first_term in range(1,snake_case_ ):
for n in range(snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_A : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54
| 1
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = torch.load(snake_case_,map_location="""cpu""" )
if "model" in sd.keys():
_A : Dict = torch.load(snake_case_,map_location="""cpu""" )["""model"""]
# pop unnecessary weights
_A : int = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
_A : str = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_A : Dict = sd.pop(snake_case_ )
_A : List[str] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_A : Tuple = sd[key]
# We split QKV in separate Q,K,V
_A : Optional[Any] = key.replace(""".qkv_proj.""",""".q_proj.""" )
_A : Tuple = key.replace(""".qkv_proj.""",""".k_proj.""" )
_A : List[str] = key.replace(""".qkv_proj.""",""".v_proj.""" )
_A : Dict = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_A , _A , _A : List[str] = torch.split(snake_case_,depth // 3,dim=0 )
_A : str = q
_A : int = k
_A : List[str] = v
del sd[key]
return sd
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=None ):
_A : Any = load_checkpoint(snake_case_ )
if config is not None:
_A : Union[str, Any] = OPTConfig.from_pretrained(snake_case_ )
else:
_A : str = OPTConfig()
_A : Optional[Any] = OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
_snake_case = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 54
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowercase ( tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ) -> Any:
super().__init__()
_A : Dict = pad_token_id
_A : List[Any] = max_length
_A : Optional[int] = vocab
_A : Optional[int] = merges
_A : Optional[int] = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> str:
_A : Any = [""" """.join(_a ) for m in tokenizer.bpe_ranks.keys()]
_A : str = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> List[Any]:
_A : Union[str, Any] = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def a__ ( cls , _a ) -> Union[str, Any]:
return cls(**_a )
def a__ ( self ) -> Union[str, Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a__ ( self , _a , _a = None ) -> int:
_A : Optional[int] = self.tf_tokenizer(_a )
_A : Tuple = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
_A : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
_A , _A : Dict = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 54
| 1
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def lowerCAmelCase_ ( snake_case_ ):
_A : List[str] = test_results.split(""" """ )
_A : Optional[int] = 0
_A : Tuple = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_A : int = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(snake_case_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = {}
_A : Optional[Any] = None
_A : Union[str, Any] = False
for line in failures_short_lines.split("""\n""" ):
if re.search(r"""_ \[doctest\]""",snake_case_ ):
_A : int = True
_A : Optional[int] = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
_A : int = line
_A : Dict = False
return failures
class lowercase :
def __init__( self , _a , _a ) -> str:
_A : Dict = title
_A : Dict = doc_test_results["""time_spent"""].split(""",""" )[0]
_A : Optional[Any] = doc_test_results["""success"""]
_A : List[Any] = doc_test_results["""failures"""]
_A : Any = self.n_success + self.n_failures
# Failures and success of the modeling tests
_A : List[str] = doc_test_results
@property
def a__ ( self ) -> str:
_A : str = [self._time_spent]
_A : List[Any] = 0
for time in time_spent:
_A : Optional[int] = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_a ) == 1:
_A : List[str] = [0, 0, time_parts[0]]
_A , _A , _A : Any = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
_A , _A , _A : Optional[Any] = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(_a )}h{int(_a )}m{int(_a )}s'''
@property
def a__ ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def a__ ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def a__ ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def a__ ( self ) -> Dict:
_A : Tuple = 40
_A : Optional[Any] = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(_a , _a )}
_A : Tuple = """"""
for category, failures in category_failures.items():
if len(_a ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def a__ ( self ) -> str:
_A : Tuple = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_a )
@staticmethod
def a__ ( ) -> List[str]:
_A : Tuple = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(_a )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=_a , )
def a__ ( self ) -> Dict:
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
_A : Tuple = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else """All tests passed."""
_A : List[str] = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=_a , )
def a__ ( self , _a , _a , _a , _a ) -> Tuple:
_A : Dict = """"""
for key, value in failures.items():
_A : List[str] = value[:200] + """ [Truncated]""" if len(_a ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
_A : Any = job_name
_A : str = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
_A : List[str] = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def a__ ( self ) -> str:
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
_A : int = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
_A : Tuple = sorted(self.doc_test_results.items() , key=lambda _a : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
_A : Tuple = F'''*Num failures* :{len(job_result["failed"] )} \n'''
_A : Any = job_result["""failures"""]
_A : Tuple = self.get_reply_blocks(_a , _a , _a , text=_a )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=F'''Results for {job}''' , blocks=_a , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def lowerCAmelCase_ ( ):
_A : Optional[int] = os.environ["""GITHUB_RUN_ID"""]
_A : Tuple = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
_A : Tuple = requests.get(snake_case_ ).json()
_A : str = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_A : List[str] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : Optional[Any] = requests.get(url + f'''&page={i + 2}''' ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""",snake_case_ )
return {}
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = {}
if os.path.exists(snake_case_ ):
_A : Optional[Any] = os.listdir(snake_case_ )
for file in files:
try:
with open(os.path.join(snake_case_,snake_case_ ),encoding="""utf-8""" ) as f:
_A : int = f.read()
except UnicodeDecodeError as e:
raise ValueError(f'''Could not open {os.path.join(snake_case_,snake_case_ )}.''' ) from e
return _artifact
def lowerCAmelCase_ ( ):
class lowercase :
def __init__( self , _a ) -> Optional[Any]:
_A : str = name
_A : Any = []
def __str__( self ) -> Optional[Any]:
return self.name
def a__ ( self , _a ) -> Tuple:
self.paths.append({"""name""": self.name, """path""": path} )
_A : Dict[str, Artifact] = {}
_A : Optional[int] = filter(os.path.isdir,os.listdir() )
for directory in directories:
_A : Optional[int] = directory
if artifact_name not in _available_artifacts:
_A : Optional[int] = Artifact(snake_case_ )
_available_artifacts[artifact_name].add_path(snake_case_ )
return _available_artifacts
if __name__ == "__main__":
_snake_case = get_job_links()
_snake_case = retrieve_available_artifacts()
_snake_case = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_snake_case = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_snake_case = github_actions_job_links.get("run_doctests")
_snake_case = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_snake_case = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_snake_case , _snake_case , _snake_case = handle_test_results(artifact["stats"])
_snake_case = failed
_snake_case = success
_snake_case = time_spent[1:-1] + ", "
_snake_case = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_snake_case = line.replace("FAILED ", "")
_snake_case = line.split()[0].replace("\n", "")
if "::" in line:
_snake_case , _snake_case = line.split("::")
else:
_snake_case , _snake_case = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_snake_case = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_snake_case = all_failures[test] if test in all_failures else "N/A"
_snake_case = failure
break
_snake_case = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 54
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_A : List[Any] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> int:
_A : Dict = """</s>"""
_A : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> Dict:
_A : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_a ) , 1103 )
def a__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> Tuple:
_A : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_A : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
_A : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : Optional[int] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_A : Union[str, Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[str]:
_A : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : Any = """To ensure a smooth flow of bank resolutions."""
_A : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = ["""This is going to be way too long.""" * 150, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Union[str, Any] = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : Tuple = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[Any]:
# fmt: off
_A : List[Any] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Tuple = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[str]:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_A : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : int = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> Dict:
_A : Optional[int] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_A : Any = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 54
| 1
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase_ ( snake_case_,snake_case_=0.9_99,snake_case_="cosine",):
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case_ ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_A : Tuple = []
for i in range(snake_case_ ):
_A : str = i / num_diffusion_timesteps
_A : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ),snake_case_ ) )
return torch.tensor(snake_case_,dtype=torch.floataa )
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = [e.name for e in KarrasDiffusionSchedulers]
_a = 2
@register_to_config
def __init__( self , _a = 1000 , _a = 0.00085 , _a = 0.012 , _a = "linear" , _a = None , _a = "epsilon" , _a = False , _a = False , _a = 1.0 , _a = "linspace" , _a = 0 , ) -> int:
if trained_betas is not None:
_A : Optional[int] = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
_A : List[str] = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_A : List[str] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_A : int = betas_for_alpha_bar(_a , alpha_transform_type="""cosine""" )
elif beta_schedule == "exp":
_A : List[str] = betas_for_alpha_bar(_a , alpha_transform_type="""exp""" )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_A : Tuple = 1.0 - self.betas
_A : Dict = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_a , _a , _a )
_A : Any = use_karras_sigmas
def a__ ( self , _a , _a=None ) -> str:
if schedule_timesteps is None:
_A : Optional[Any] = self.timesteps
_A : str = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_A : List[Any] = 1 if len(_a ) > 1 else 0
else:
_A : Any = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
_A : Any = self._index_counter[timestep_int]
return indices[pos].item()
@property
def a__ ( self ) -> Any:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def a__ ( self , _a , _a , ) -> torch.FloatTensor:
_A : Dict = self.index_for_timestep(_a )
_A : Optional[int] = self.sigmas[step_index]
_A : Any = sample / ((sigma**2 + 1) ** 0.5)
return sample
def a__ ( self , _a , _a = None , _a = None , ) -> Union[str, Any]:
_A : str = num_inference_steps
_A : Optional[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_A : Dict = np.linspace(0 , num_train_timesteps - 1 , _a , dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_A : Any = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A : Any = (np.arange(0 , _a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_A : Dict = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A : Tuple = (np.arange(_a , 0 , -step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_A : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_A : Optional[int] = np.log(_a )
_A : List[str] = np.interp(_a , np.arange(0 , len(_a ) ) , _a )
if self.config.use_karras_sigmas:
_A : str = self._convert_to_karras(in_sigmas=_a , num_inference_steps=self.num_inference_steps )
_A : str = np.array([self._sigma_to_t(_a , _a ) for sigma in sigmas] )
_A : Union[str, Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_A : str = torch.from_numpy(_a ).to(device=_a )
_A : int = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_A : Dict = torch.from_numpy(_a )
_A : str = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith("""mps""" ):
# mps does not support float64
_A : Tuple = timesteps.to(_a , dtype=torch.floataa )
else:
_A : Optional[int] = timesteps.to(device=_a )
# empty dt and derivative
_A : Union[str, Any] = None
_A : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_A : Dict = defaultdict(_a )
def a__ ( self , _a , _a ) -> List[Any]:
# get log sigma
_A : str = np.log(_a )
# get distribution
_A : List[str] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_A : int = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_A : Any = low_idx + 1
_A : Dict = log_sigmas[low_idx]
_A : List[str] = log_sigmas[high_idx]
# interpolate sigmas
_A : Tuple = (low - log_sigma) / (low - high)
_A : Optional[int] = np.clip(_a , 0 , 1 )
# transform interpolation to time range
_A : List[Any] = (1 - w) * low_idx + w * high_idx
_A : str = t.reshape(sigma.shape )
return t
def a__ ( self , _a , _a ) -> torch.FloatTensor:
_A : float = in_sigmas[-1].item()
_A : float = in_sigmas[0].item()
_A : List[str] = 7.0 # 7.0 is the value used in the paper
_A : Optional[int] = np.linspace(0 , 1 , _a )
_A : Optional[Any] = sigma_min ** (1 / rho)
_A : Optional[Any] = sigma_max ** (1 / rho)
_A : Union[str, Any] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def a__ ( self ) -> Dict:
return self.dt is None
def a__ ( self , _a , _a , _a , _a = True , ) -> Union[SchedulerOutput, Tuple]:
_A : str = self.index_for_timestep(_a )
# advance index counter by 1
_A : Optional[int] = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_A : Optional[Any] = self.sigmas[step_index]
_A : Any = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_A : Optional[int] = self.sigmas[step_index - 1]
_A : List[str] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_A : Union[str, Any] = 0
_A : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_A : List[str] = sigma_hat if self.state_in_first_order else sigma_next
_A : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_A : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_next
_A : Tuple = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_A : Optional[Any] = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
_A : Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_A : Any = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_A : Tuple = sigma_next - sigma_hat
# store for 2nd order step
_A : List[Any] = derivative
_A : Dict = dt
_A : Dict = sample
else:
# 2. 2nd order / Heun's method
_A : Dict = (sample - pred_original_sample) / sigma_next
_A : Dict = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_A : Any = self.dt
_A : List[str] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_A : List[str] = None
_A : List[Any] = None
_A : List[Any] = None
_A : List[Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def a__ ( self , _a , _a , _a , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_A : List[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
_A : List[Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_A : Tuple = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_A : str = self.timesteps.to(original_samples.device )
_A : List[str] = timesteps.to(original_samples.device )
_A : Union[str, Any] = [self.index_for_timestep(_a , _a ) for t in timesteps]
_A : Union[str, Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_A : Optional[int] = sigma.unsqueeze(-1 )
_A : Dict = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> List[str]:
return self.config.num_train_timesteps
| 54
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 1
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def lowerCAmelCase_ ( snake_case_ ):
_A : str = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_A : Dict = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_A : Optional[Any] = 4
_A : str = 48
_A : List[str] = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_A : Dict = [6, 6, 6, 6]
_A : Union[str, Any] = 60
_A : int = [6, 6, 6, 6]
_A : Dict = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_A : str = 4
_A : Optional[Any] = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_A : int = 1
_A : Optional[int] = 1
_A : str = 126
_A : Optional[Any] = 7
_A : Tuple = 2_55.0
_A : Any = """"""
return config
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if "patch_embed.proj" in name and "layers" not in name:
_A : Optional[int] = name.replace("""patch_embed.proj""","""embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
_A : List[Any] = name.replace("""patch_embed.norm""","""embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
_A : int = name.replace("""layers""","""encoder.stages""" )
if "residual_group.blocks" in name:
_A : Dict = name.replace("""residual_group.blocks""","""layers""" )
if "attn.proj" in name:
_A : int = name.replace("""attn.proj""","""attention.output.dense""" )
if "attn" in name:
_A : List[str] = name.replace("""attn""","""attention.self""" )
if "norm1" in name:
_A : int = name.replace("""norm1""","""layernorm_before""" )
if "norm2" in name:
_A : Tuple = name.replace("""norm2""","""layernorm_after""" )
if "mlp.fc1" in name:
_A : Tuple = name.replace("""mlp.fc1""","""intermediate.dense""" )
if "mlp.fc2" in name:
_A : str = name.replace("""mlp.fc2""","""output.dense""" )
if "q_bias" in name:
_A : Union[str, Any] = name.replace("""q_bias""","""query.bias""" )
if "k_bias" in name:
_A : List[str] = name.replace("""k_bias""","""key.bias""" )
if "v_bias" in name:
_A : str = name.replace("""v_bias""","""value.bias""" )
if "cpb_mlp" in name:
_A : Dict = name.replace("""cpb_mlp""","""continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
_A : Dict = name.replace("""patch_embed.proj""","""patch_embed.projection""" )
if name == "norm.weight":
_A : int = """layernorm.weight"""
if name == "norm.bias":
_A : List[Any] = """layernorm.bias"""
if "conv_first" in name:
_A : List[str] = name.replace("""conv_first""","""first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_A : Tuple = name.replace("""conv_last""","""final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_A : Optional[int] = name.replace("""conv_before_upsample.0""","""conv_before_upsample""" )
if "upsample.0" in name:
_A : Tuple = name.replace("""upsample.0""","""upsample.convolution_0""" )
if "upsample.2" in name:
_A : List[Any] = name.replace("""upsample.2""","""upsample.convolution_1""" )
_A : List[str] = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
_A : Dict = name.replace("""upsample.0.weight""","""upsample.conv.weight""" )
_A : str = name.replace("""upsample.0.bias""","""upsample.conv.bias""" )
else:
pass
else:
_A : Any = """swin2sr.""" + name
return name
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
for key in orig_state_dict.copy().keys():
_A : str = orig_state_dict.pop(snake_case_ )
if "qkv" in key:
_A : Dict = key.split(""".""" )
_A : Any = int(key_split[1] )
_A : Any = int(key_split[4] )
_A : Optional[Any] = config.embed_dim
if "weight" in key:
_A : Tuple = val[:dim, :]
_A : Any = val[dim : dim * 2, :]
_A : List[str] = val[-dim:, :]
else:
_A : Any = val[:dim]
_A : Tuple = val[dim : dim * 2]
_A : Optional[int] = val[-dim:]
pass
else:
_A : Dict = val
return orig_state_dict
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = get_config(snake_case_ )
_A : Optional[int] = SwinaSRForImageSuperResolution(snake_case_ )
model.eval()
_A : Optional[int] = torch.hub.load_state_dict_from_url(snake_case_,map_location="""cpu""" )
_A : str = convert_state_dict(snake_case_,snake_case_ )
_A , _A : Union[str, Any] = model.load_state_dict(snake_case_,strict=snake_case_ )
if len(snake_case_ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(snake_case_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
_A : Union[str, Any] = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
_A : str = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw ).convert("""RGB""" )
_A : List[str] = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_A : str = 126 if """Jpeg""" in checkpoint_url else 256
_A : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_85, 0.4_56, 0.4_06],std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_A : List[str] = transforms(snake_case_ ).unsqueeze(0 )
if config.num_channels == 1:
_A : Optional[Any] = pixel_values[:, 0, :, :].unsqueeze(1 )
_A : int = model(snake_case_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_A : int = torch.Size([1, 3, 512, 512] )
_A : Union[str, Any] = torch.tensor(
[[-0.70_87, -0.71_38, -0.67_21], [-0.83_40, -0.80_95, -0.72_98], [-0.91_49, -0.84_14, -0.79_40]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_A : Any = torch.Size([1, 3, 1024, 1024] )
_A : str = torch.tensor(
[[-0.77_75, -0.81_05, -0.89_33], [-0.77_64, -0.83_56, -0.92_25], [-0.79_76, -0.86_86, -0.95_79]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_A : int = torch.Size([1, 3, 1024, 1024] )
_A : List[Any] = torch.tensor(
[[-0.80_35, -0.75_04, -0.74_91], [-0.85_38, -0.81_24, -0.77_82], [-0.88_04, -0.86_51, -0.84_93]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_A : str = torch.Size([1, 3, 512, 512] )
_A : List[Any] = torch.tensor(
[[-0.76_69, -0.86_62, -0.87_67], [-0.88_10, -0.99_62, -0.98_20], [-0.93_40, -1.03_22, -1.11_49]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_A : Union[str, Any] = torch.Size([1, 3, 1024, 1024] )
_A : Optional[int] = torch.tensor(
[[-0.52_38, -0.55_57, -0.63_21], [-0.60_16, -0.59_03, -0.63_91], [-0.62_44, -0.63_34, -0.68_89]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3],snake_case_,atol=1e-3 )
print("""Looks ok!""" )
_A : Union[str, Any] = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
_A : Any = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case_ )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
_snake_case = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 54
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> Optional[int]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(_a )
def a__ ( self ) -> Any:
_A : str = self._create_example_records()
_A : List[Any] = Dataset.from_list(_a )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(_a ):
self.assertDictEqual(_a , example_records[i] )
def a__ ( self ) -> List[str]:
_A : Dict = self._create_example_records()
_A : List[str] = Dataset.from_list(_a )
_A : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a__ ( self ) -> str: # checks what happens with missing columns
_A : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A : List[str] = Dataset.from_list(_a )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a__ ( self ) -> Dict: # checks if the type can be inferred from the second record
_A : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A : str = Dataset.from_list(_a )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a__ ( self ) -> Dict:
_A : List[str] = Dataset.from_list([] )
self.assertEqual(len(_a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 54
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=None , ) -> Dict:
_A : List[Any] = size if size is not None else {"""shortest_edge""": 20}
_A : List[Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_A : List[Any] = parent
_A : Optional[int] = batch_size
_A : Union[str, Any] = num_channels
_A : Tuple = image_size
_A : Optional[Any] = min_resolution
_A : int = max_resolution
_A : List[str] = do_resize
_A : Optional[Any] = size
_A : List[str] = do_center_crop
_A : Tuple = crop_size
def a__ ( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = MobileNetVaImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : Dict = MobileNetVaImageProcessingTester(self )
@property
def a__ ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> str:
_A : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
self.assertTrue(hasattr(_a , """do_center_crop""" ) )
self.assertTrue(hasattr(_a , """crop_size""" ) )
def a__ ( self ) -> Optional[Any]:
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def a__ ( self ) -> List[str]:
pass
def a__ ( self ) -> str:
# Initialize image_processing
_A : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_A : Union[str, Any] = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def a__ ( self ) -> int:
# Initialize image_processing
_A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_A : Union[str, Any] = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 54
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = list(snake_case_ )
_A : List[Any] = list(snake_case_ )
_A : Tuple = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
_A : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = []
while True:
_A : int = ["""$"""] * len(snake_case_ )
_A : Any = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1,len(snake_case_ ) ):
_A : Tuple = compare_string(binary[i],binary[j] )
if k is False:
_A : str = """*"""
_A : str = """*"""
temp.append("""X""" )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
_A : Dict = list(set(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = []
for minterm in minterms:
_A : Tuple = """"""
for _ in range(snake_case_ ):
_A : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = list(snake_case_ )
_A : Tuple = list(snake_case_ )
_A : Dict = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = []
_A : str = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
_A : Union[str, Any] = 0
_A : Optional[Any] = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
_A : Dict = j
if count == 1:
_A : int = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
_A : int = 0
temp.append(prime_implicants[i] )
while True:
_A : Optional[Any] = 0
_A : Tuple = -1
_A : List[Any] = 0
for i in range(len(snake_case_ ) ):
_A : List[str] = chart[i].count(1 )
if count_n > max_n:
_A : Optional[int] = count_n
_A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
_A : Optional[int] = 0
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
_A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i],binary[j],snake_case_ ):
_A : Union[str, Any] = 1
return chart
def lowerCAmelCase_ ( ):
_A : Dict = int(input("""Enter the no. of variables\n""" ) )
_A : Dict = [
float(snake_case_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_A : int = decimal_to_binary(snake_case_,snake_case_ )
_A : Optional[Any] = check(snake_case_ )
print("""Prime Implicants are:""" )
print(snake_case_ )
_A : int = prime_implicant_chart(snake_case_,snake_case_ )
_A : int = selection(snake_case_,snake_case_ )
print("""Essential Prime Implicants are:""" )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 54
| 1
|
def lowerCAmelCase_ ( snake_case_ ):
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
_A : Optional[Any] = sum(snake_case_ ) / len(snake_case_ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ = None ):
_A : Tuple = word_bank or []
# create a table
_A : int = len(snake_case_ ) + 1
_A : list[list[list[str]]] = []
for _ in range(snake_case_ ):
table.append([] )
# seed value
_A : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case_ )] == word:
_A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case_ )]:
combination.reverse()
return table[len(snake_case_ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 54
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
_a = 42
_a = None
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = 2
@register_to_config
def __init__( self , _a = 0.02 , _a = 100 , _a = 1.007 , _a = 80 , _a = 0.05 , _a = 50 , ) -> str:
# standard deviation of the initial noise distribution
_A : Tuple = sigma_max
# setable values
_A : int = None
_A : np.IntTensor = None
_A : torch.FloatTensor = None # sigma(t_i)
def a__ ( self , _a , _a = None ) -> torch.FloatTensor:
return sample
def a__ ( self , _a , _a = None ) -> int:
_A : List[Any] = num_inference_steps
_A : int = np.arange(0 , self.num_inference_steps )[::-1].copy()
_A : Tuple = torch.from_numpy(_a ).to(_a )
_A : Tuple = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_A : List[str] = torch.tensor(_a , dtype=torch.floataa , device=_a )
def a__ ( self , _a , _a , _a = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
_A : Tuple = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_A : str = 0
# sample eps ~ N(0, S_noise^2 * I)
_A : Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=_a ).to(sample.device )
_A : Union[str, Any] = sigma + gamma * sigma
_A : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def a__ ( self , _a , _a , _a , _a , _a = True , ) -> Union[KarrasVeOutput, Tuple]:
_A : Any = sample_hat + sigma_hat * model_output
_A : Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat
_A : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_a , derivative=_a , pred_original_sample=_a )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a = True , ) -> Union[KarrasVeOutput, Tuple]:
_A : Union[str, Any] = sample_prev + sigma_prev * model_output
_A : List[str] = (sample_prev - pred_original_sample) / sigma_prev
_A : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_a , derivative=_a , pred_original_sample=_a )
def a__ ( self , _a , _a , _a ) -> Dict:
raise NotImplementedError()
| 54
|
import operator
def lowerCAmelCase_ ( snake_case_,snake_case_ = False,snake_case_ = None ):
_A : str = operator.lt if reverse else operator.gt
_A : Optional[Any] = solution or []
if not arr:
return solution
_A : Dict = [arr.pop(0 )]
for i, item in enumerate(snake_case_ ):
if _operator(snake_case_,sublist[-1] ):
sublist.append(snake_case_ )
arr.pop(snake_case_ )
# merging sublist into solution list
if not solution:
solution.extend(snake_case_ )
else:
while sublist:
_A : Union[str, Any] = sublist.pop(0 )
for i, xx in enumerate(snake_case_ ):
if not _operator(snake_case_,snake_case_ ):
solution.insert(snake_case_,snake_case_ )
break
else:
solution.append(snake_case_ )
strand_sort(snake_case_,snake_case_,snake_case_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 54
| 1
|
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_snake_case = datasets.logging.get_logger(__name__)
_snake_case = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
_snake_case = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
_snake_case = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
_snake_case = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def a__ ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def a__ ( self , _a ) -> Union[str, Any]:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
_A : List[Any] = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
_A : Optional[int] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_A : List[Any] = self.config_name.upper()
else:
raise KeyError(
F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
_A : str = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
_A : Any = score.BleurtScorer(os.path.join(_a , _a ) )
def a__ ( self , _a , _a ) -> Tuple:
_A : Union[str, Any] = self.scorer.score(references=_a , candidates=_a )
return {"scores": scores}
| 54
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_MASKED_LM_MAPPING
_a = TF_MODEL_FOR_MASKED_LM_MAPPING
def a__ ( self ) -> Tuple:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def a__ ( self ) -> Any:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 3_8015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 2_5506, """token_str""": """ accuser"""},
] , )
_A : Tuple = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 3_8015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 2_5506,
"""token_str""": """ accuser""",
},
] , )
_A : List[str] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> str:
_A : Any = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
_A : List[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 3_5676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
] , )
_A : str = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def a__ ( self ) -> Union[str, Any]:
_A : int = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
_A : Optional[Any] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_a , _a )
@slow
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(_a )
@slow
@require_tf
def a__ ( self ) -> Tuple:
_A : str = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(_a )
def a__ ( self , _a ) -> Tuple:
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
_A : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2790,
"""token_str""": """ Lyon""",
},
] , )
_A : Optional[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> Tuple:
_A : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
_A : str = None
_A : Union[str, Any] = None
self.run_pipeline_test(_a , [] )
@require_tf
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
_A : Any = None
_A : Dict = None
self.run_pipeline_test(_a , [] )
def a__ ( self , _a , _a , _a ) -> Any:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
_A : Optional[Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Tuple = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def a__ ( self , _a , _a ) -> Dict:
_A : Dict = fill_masker.tokenizer
_A : List[str] = fill_masker.model
_A : List[str] = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Optional[Any] = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
with self.assertRaises(_a ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_a ):
fill_masker("""This is""" )
self.run_test_top_k(_a , _a )
self.run_test_targets(_a , _a )
self.run_test_top_k_targets(_a , _a )
self.fill_mask_with_duplicate_targets_and_top_k(_a , _a )
self.fill_mask_with_multiple_masks(_a , _a )
def a__ ( self , _a , _a ) -> List[str]:
_A : int = tokenizer.get_vocab()
_A : str = sorted(vocab.keys() )[:2]
# Pipeline argument
_A : Tuple = FillMaskPipeline(model=_a , tokenizer=_a , targets=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Call argument
_A : str = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : int = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Score equivalence
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Optional[int] = [top_mask["""token_str"""] for top_mask in outputs]
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ) == set(_a ):
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
# Raises with invalid
with self.assertRaises(_a ):
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_a ):
_A : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(_a ):
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets="""""" )
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : str = FillMaskPipeline(model=_a , tokenizer=_a , top_k=2 )
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Union[str, Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> List[Any]:
_A : Union[str, Any] = tokenizer.get_vocab()
_A : int = FillMaskPipeline(model=_a , tokenizer=_a )
# top_k=2, ntargets=3
_A : List[str] = sorted(vocab.keys() )[:3]
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_a )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_A : Any = [el["""token_str"""] for el in sorted(_a , key=lambda _a : x["score"] , reverse=_a )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ).issubset(_a ):
_A : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_a )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> str:
_A : Optional[int] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
_A : Optional[Any] = sorted(vocab.keys() )[:3]
_A : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_A : Union[str, Any] = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=_a , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_a ) , 3 )
def a__ ( self , _a , _a ) -> Tuple:
_A : Any = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[Any] = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
| 54
| 1
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( snake_case_ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case_,snake_case_ ) -> bool:
_A : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A : Optional[int] = mean(
int(is_in_circle(uniform(-1.0,1.0 ),uniform(-1.0,1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
_A : List[str] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0.0,snake_case_ = 1.0,):
return mean(
function_to_integrate(uniform(snake_case_,snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.0,snake_case_ = 1.0 ):
def identity_function(snake_case_ ) -> float:
return x
_A : Any = area_under_curve_estimator(
snake_case_,snake_case_,snake_case_,snake_case_ )
_A : Tuple = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def lowerCAmelCase_ ( snake_case_ ):
def function_to_integrate(snake_case_ ) -> float:
return sqrt(4.0 - x * x )
_A : Optional[int] = area_under_curve_estimator(
snake_case_,snake_case_,0.0,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def a__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
_A : int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_A : str = dict(zip(_a , range(len(_a ) ) ) )
_A : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
_A : str = {"""unk_token""": """<unk>"""}
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , **_a ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> str:
_A : Tuple = """lower newer"""
_A : Optional[Any] = """lower newer"""
return input_text, output_text
def a__ ( self ) -> List[Any]:
_A : Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A : str = """lower newer"""
_A : List[str] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
_A : int = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_A : str = tokens + [tokenizer.unk_token]
_A : Dict = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
@require_ftfy
def a__ ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : Dict = self.tokenizer_class.from_pretrained(_a , **_a )
_A : str = self.rust_tokenizer_class.from_pretrained(_a , **_a )
_A : List[str] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_A : Tuple = """xa\u0303y""" + """ """ + """x\xe3y"""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of space type
_A : Any = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_A : Optional[int] = tokenizer_s.tokenize(_a )
_A : List[Any] = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of line break type
_A : int = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_A : Optional[Any] = tokenizer_s.tokenize(_a )
_A : Tuple = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : List[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_A : str = F'''{text_of_1_token} {text_of_1_token}'''
_A : str = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Dict = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
_A : Any = F''' {text}'''
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Optional[int] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , )
def a__ ( self ) -> List[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_a ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def a__ ( self ) -> str:
super().test_tokenization_python_rust_equals()
def a__ ( self ) -> Union[str, Any]:
# CLIP always lower cases letters
pass
| 54
| 1
|
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self , _a , _a , _a = 0 ) -> None:
_A , _A : List[str] = row, column
_A : Union[str, Any] = [[default_value for c in range(_a )] for r in range(_a )]
def __str__( self ) -> str:
_A : Union[str, Any] = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_A : Dict = 0
for row_vector in self.array:
for obj in row_vector:
_A : List[Any] = max(_a , len(str(_a ) ) )
_A : Optional[Any] = F'''%{max_element_length}s'''
# Make string and return
def single_line(_a ) -> str:
nonlocal string_format_identifier
_A : Optional[Any] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_a ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
return str(self )
def a__ ( self , _a ) -> bool:
if not (isinstance(_a , (list, tuple) ) and len(_a ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , _a ) -> Any:
assert self.validate_indicies(_a )
return self.array[loc[0]][loc[1]]
def __setitem__( self , _a , _a ) -> None:
assert self.validate_indicies(_a )
_A : str = value
def __add__( self , _a ) -> Matrix:
assert isinstance(_a , _a )
assert self.row == another.row and self.column == another.column
# Add
_A : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_A : int = self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
_A : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_A : Tuple = -self[r, c]
return result
def __sub__( self , _a ) -> Matrix:
return self + (-another)
def __mul__( self , _a ) -> Matrix:
if isinstance(_a , (int, float) ): # Scalar multiplication
_A : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_A : Optional[int] = self[r, c] * another
return result
elif isinstance(_a , _a ): # Matrix multiplication
assert self.column == another.row
_A : Optional[int] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_A : Union[str, Any] = F'''Unsupported type given for another ({type(_a )})'''
raise TypeError(_a )
def a__ ( self ) -> Matrix:
_A : List[Any] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
_A : Optional[int] = self[r, c]
return result
def a__ ( self , _a , _a ) -> Any:
assert isinstance(_a , _a ) and isinstance(_a , _a )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_A : str = v.transpose()
_A : int = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCAmelCase_ ( ):
# a^(-1)
_A : Tuple = Matrix(3,3,0 )
for i in range(3 ):
_A : List[Any] = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
_A : Tuple = Matrix(3,1,0 )
_A , _A , _A : List[Any] = 1, 2, -3
_A : Any = Matrix(3,1,0 )
_A , _A , _A : int = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(snake_case_,snake_case_ )}''' )
def lowerCAmelCase_ ( ):
import doctest
doctest.testmod()
testa()
| 54
|
from datetime import datetime as dt
import os
from github import Github
_snake_case = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCAmelCase_ ( ):
_A : int = Github(os.environ["""GITHUB_TOKEN"""] )
_A : Tuple = g.get_repo("""huggingface/transformers""" )
_A : Dict = repo.get_issues(state="""open""" )
for issue in open_issues:
_A : Optional[Any] = sorted([comment for comment in issue.get_comments()],key=lambda snake_case_ : i.created_at,reverse=snake_case_ )
_A : Dict = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 54
| 1
|
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_snake_case = logging.get_logger(__name__)
class lowercase ( UpperCamelCase__ ):
_a = ["input_values", "padding_mask"]
def __init__( self , _a = 1 , _a = 2_4000 , _a = 0.0 , _a = None , _a = None , **_a , ) -> Optional[int]:
super().__init__(feature_size=_a , sampling_rate=_a , padding_value=_a , **_a )
_A : List[Any] = chunk_length_s
_A : Union[str, Any] = overlap
@property
def a__ ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__ ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , _a , _a = None , _a = False , _a = None , _a = None , _a = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
_A : Dict = True
_A : Dict = bool(
isinstance(_a , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
_A : Optional[int] = [np.asarray(_a , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_a , np.ndarray ):
_A : List[str] = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_A : Union[str, Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_A : int = [np.asarray(_a ).T]
# verify inputs are valid
for idx, example in enumerate(_a ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
_A : Any = None
_A : Optional[int] = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_A : Optional[int] = min(array.shape[0] for array in raw_audio )
_A : str = int(np.floor(max_length / self.chunk_stride ) )
_A : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_A : Tuple = max(array.shape[0] for array in raw_audio )
_A : Dict = int(np.ceil(max_length / self.chunk_stride ) )
_A : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
_A : Tuple = """max_length"""
else:
_A : Optional[int] = input_values
# normal padding on batch
if padded_inputs is None:
_A : Tuple = self.pad(
_a , max_length=_a , truncation=_a , padding=_a , return_attention_mask=_a , )
if padding:
_A : Dict = padded_inputs.pop("""attention_mask""" )
_A : Any = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
_A : str = example[..., None]
input_values.append(example.T )
_A : List[str] = input_values
if return_tensors is not None:
_A : List[str] = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 54
|
from __future__ import annotations
class lowercase :
def __init__( self , _a = 0 ) -> str:
_A : Any = key
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_a , _a ) )
except OSError:
return False
return True
def a__ ( self , _a , _a ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_a , _a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 54
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class lowercase ( UpperCamelCase__ ):
_a = "xmod"
def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ) -> Optional[Any]:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : List[Any] = vocab_size
_A : Union[str, Any] = hidden_size
_A : Optional[Any] = num_hidden_layers
_A : List[str] = num_attention_heads
_A : str = hidden_act
_A : Any = intermediate_size
_A : Any = hidden_dropout_prob
_A : Optional[int] = attention_probs_dropout_prob
_A : Any = max_position_embeddings
_A : int = type_vocab_size
_A : Tuple = initializer_range
_A : Dict = layer_norm_eps
_A : List[Any] = position_embedding_type
_A : Union[str, Any] = use_cache
_A : List[Any] = classifier_dropout
_A : Union[str, Any] = pre_norm
_A : Union[str, Any] = adapter_reduction_factor
_A : int = adapter_layer_norm
_A : List[Any] = adapter_reuse_layer_norm
_A : str = ln_before_adapter
_A : Optional[Any] = list(_a )
_A : Optional[int] = default_language
class lowercase ( UpperCamelCase__ ):
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 54
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=10 , _a=160 , _a=8 , _a=0.0 , _a=4000 , _a=False , _a=True , ) -> Optional[int]:
_A : Any = parent
_A : List[Any] = batch_size
_A : List[Any] = min_seq_length
_A : Dict = max_seq_length
_A : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = padding_value
_A : Tuple = sampling_rate
_A : str = return_attention_mask
_A : Any = do_normalize
_A : Union[str, Any] = feature_size
_A : List[Any] = chunk_length
_A : List[Any] = hop_length
def a__ ( self ) -> List[str]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , _a=False , _a=False ) -> List[str]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : Any = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = WhisperFeatureExtractor if is_speech_available() else None
def a__ ( self ) -> Tuple:
_A : Optional[int] = WhisperFeatureExtractionTester(self )
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Tuple = feat_extract_first.to_dict()
_A : List[Any] = feat_extract_second.to_dict()
_A : List[Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Dict:
_A : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Dict = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Optional[int] = self.feature_extraction_class.from_json_file(_a )
_A : str = feat_extract_first.to_dict()
_A : Any = feat_extract_second.to_dict()
_A : Union[str, Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Any = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_A : Dict = feature_extractor(_a , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_A : List[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_A : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : Tuple = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_A : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Any = np.asarray(_a )
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : int = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test truncation required
_A : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs]
_A : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs_truncated]
_A : Optional[int] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : List[Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> Dict:
import torch
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
_A : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_A : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ ( self , _a ) -> Dict:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Optional[Any] = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Tuple:
# fmt: off
_A : Dict = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_A : Dict = self._load_datasamples(1 )
_A : Optional[Any] = WhisperFeatureExtractor()
_A : Optional[Any] = feature_extractor(_a , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1e-4 ) )
def a__ ( self ) -> str:
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : str = self._load_datasamples(1 )[0]
_A : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_A : List[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1e-3 ) )
| 54
| 1
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( nn.Module ):
def __init__( self , _a=3 , _a=3 , _a=("DownEncoderBlock2D",) , _a=(64,) , _a=2 , _a=32 , _a="silu" , _a=True , ) -> Any:
super().__init__()
_A : Optional[int] = layers_per_block
_A : Tuple = torch.nn.Convad(
_a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_A : str = None
_A : int = nn.ModuleList([] )
# down
_A : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(_a ):
_A : Optional[int] = output_channel
_A : List[Any] = block_out_channels[i]
_A : List[Any] = i == len(_a ) - 1
_A : Union[str, Any] = get_down_block(
_a , num_layers=self.layers_per_block , in_channels=_a , out_channels=_a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=_a , resnet_groups=_a , attention_head_dim=_a , temb_channels=_a , )
self.down_blocks.append(_a )
# mid
_A : List[str] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_a , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=_a , temb_channels=_a , )
# out
_A : Dict = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_a , eps=1e-6 )
_A : Optional[int] = nn.SiLU()
_A : List[str] = 2 * out_channels if double_z else out_channels
_A : List[str] = nn.Convad(block_out_channels[-1] , _a , 3 , padding=1 )
_A : int = False
def a__ ( self , _a ) -> Optional[Any]:
_A : Dict = x
_A : Tuple = self.conv_in(_a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(_a ):
def custom_forward(*_a ):
return module(*_a )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
_A : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(_a ) , _a , use_reentrant=_a )
# middle
_A : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _a , use_reentrant=_a )
else:
for down_block in self.down_blocks:
_A : Optional[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(_a ) , _a )
# middle
_A : Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , _a )
else:
# down
for down_block in self.down_blocks:
_A : Optional[int] = down_block(_a )
# middle
_A : int = self.mid_block(_a )
# post-process
_A : Tuple = self.conv_norm_out(_a )
_A : Optional[int] = self.conv_act(_a )
_A : Optional[int] = self.conv_out(_a )
return sample
class lowercase ( nn.Module ):
def __init__( self , _a=3 , _a=3 , _a=("UpDecoderBlock2D",) , _a=(64,) , _a=2 , _a=32 , _a="silu" , _a="group" , ) -> Optional[int]:
super().__init__()
_A : str = layers_per_block
_A : str = nn.Convad(
_a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_A : Optional[int] = None
_A : int = nn.ModuleList([] )
_A : Dict = in_channels if norm_type == """spatial""" else None
# mid
_A : Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_a , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_a , temb_channels=_a , )
# up
_A : Union[str, Any] = list(reversed(_a ) )
_A : int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_a ):
_A : List[Any] = output_channel
_A : Tuple = reversed_block_out_channels[i]
_A : Optional[Any] = i == len(_a ) - 1
_A : Optional[int] = get_up_block(
_a , num_layers=self.layers_per_block + 1 , in_channels=_a , out_channels=_a , prev_output_channel=_a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=_a , resnet_groups=_a , attention_head_dim=_a , temb_channels=_a , resnet_time_scale_shift=_a , )
self.up_blocks.append(_a )
_A : str = output_channel
# out
if norm_type == "spatial":
_A : str = SpatialNorm(block_out_channels[0] , _a )
else:
_A : int = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_a , eps=1e-6 )
_A : Dict = nn.SiLU()
_A : str = nn.Convad(block_out_channels[0] , _a , 3 , padding=1 )
_A : int = False
def a__ ( self , _a , _a=None ) -> Tuple:
_A : Union[str, Any] = z
_A : int = self.conv_in(_a )
_A : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_a ):
def custom_forward(*_a ):
return module(*_a )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
_A : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _a , _a , use_reentrant=_a )
_A : List[Any] = sample.to(_a )
# up
for up_block in self.up_blocks:
_A : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(_a ) , _a , _a , use_reentrant=_a )
else:
# middle
_A : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _a , _a )
_A : int = sample.to(_a )
# up
for up_block in self.up_blocks:
_A : int = torch.utils.checkpoint.checkpoint(create_custom_forward(_a ) , _a , _a )
else:
# middle
_A : Any = self.mid_block(_a , _a )
_A : Union[str, Any] = sample.to(_a )
# up
for up_block in self.up_blocks:
_A : Union[str, Any] = up_block(_a , _a )
# post-process
if latent_embeds is None:
_A : Any = self.conv_norm_out(_a )
else:
_A : Union[str, Any] = self.conv_norm_out(_a , _a )
_A : str = self.conv_act(_a )
_A : Tuple = self.conv_out(_a )
return sample
class lowercase ( nn.Module ):
def __init__( self , _a , _a , _a , _a=None , _a="random" , _a=False , _a=True ) -> Any:
super().__init__()
_A : Tuple = n_e
_A : Optional[Any] = vq_embed_dim
_A : Tuple = beta
_A : Dict = legacy
_A : List[str] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_A : Tuple = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
_A : int = self.used.shape[0]
_A : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_A : Optional[int] = self.re_embed
_A : Optional[Any] = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
_A : Dict = n_e
_A : Dict = sane_index_shape
def a__ ( self , _a ) -> int:
_A : Tuple = inds.shape
assert len(_a ) > 1
_A : Union[str, Any] = inds.reshape(ishape[0] , -1 )
_A : Union[str, Any] = self.used.to(_a )
_A : Optional[Any] = (inds[:, :, None] == used[None, None, ...]).long()
_A : int = match.argmax(-1 )
_A : Any = match.sum(2 ) < 1
if self.unknown_index == "random":
_A : List[str] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_A : int = self.unknown_index
return new.reshape(_a )
def a__ ( self , _a ) -> Dict:
_A : Tuple = inds.shape
assert len(_a ) > 1
_A : List[Any] = inds.reshape(ishape[0] , -1 )
_A : Union[str, Any] = self.used.to(_a )
if self.re_embed > self.used.shape[0]: # extra token
_A : str = 0 # simply set to zero
_A : Any = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _a )
return back.reshape(_a )
def a__ ( self , _a ) -> str:
# reshape z -> (batch, height, width, channel) and flatten
_A : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
_A : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_A : Tuple = torch.argmin(torch.cdist(_a , self.embedding.weight ) , dim=1 )
_A : Any = self.embedding(_a ).view(z.shape )
_A : Optional[Any] = None
_A : Any = None
# compute loss for embedding
if not self.legacy:
_A : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_A : Any = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_A : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
_A : Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_A : str = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_A : int = self.remap_to_used(_a )
_A : List[Any] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_A : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def a__ ( self , _a , _a ) -> List[str]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_A : List[str] = indices.reshape(shape[0] , -1 ) # add batch axis
_A : Optional[int] = self.unmap_to_all(_a )
_A : Any = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_A : str = self.embedding(_a )
if shape is not None:
_A : int = z_q.view(_a )
# reshape back to match original input shape
_A : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a=False ) -> Any:
_A : Dict = parameters
_A , _A : Tuple = torch.chunk(_a , 2 , dim=1 )
_A : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
_A : Tuple = deterministic
_A : Any = torch.exp(0.5 * self.logvar )
_A : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
_A : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def a__ ( self , _a = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_A : Union[str, Any] = randn_tensor(
self.mean.shape , generator=_a , device=self.parameters.device , dtype=self.parameters.dtype )
_A : int = self.mean + self.std * sample
return x
def a__ ( self , _a=None ) -> Tuple:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def a__ ( self , _a , _a=[1, 2, 3] ) -> Optional[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
_A : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=_a )
def a__ ( self ) -> str:
return self.mean
| 54
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase_ ( snake_case_ ):
return data[1:] + data[0]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = """"""
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = int("""0b""" + data[0] + data[-1],2 )
_A : Any = int("""0b""" + data[1:3],2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[str] = message[:4]
_A : List[Any] = message[4:]
_A : Union[str, Any] = apply_table(snake_case_,snake_case_ )
_A : List[Any] = xor(snake_case_,snake_case_ )
_A : Optional[Any] = apply_sbox(snake_case_,temp[:4] ) # noqa: E741
_A : List[Any] = apply_sbox(snake_case_,temp[4:] )
_A : int = """0""" * (2 - len(snake_case_ )) + l # noqa: E741
_A : Union[str, Any] = """0""" * (2 - len(snake_case_ )) + r
_A : List[Any] = apply_table(l + r,snake_case_ )
_A : Any = xor(snake_case_,snake_case_ )
return temp + right
if __name__ == "__main__":
_snake_case = input("Enter 10 bit key: ")
_snake_case = input("Enter 8 bit message: ")
_snake_case = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case = [2, 4, 3, 1]
_snake_case = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case = apply_table(key, paa_table)
_snake_case = temp[:5]
_snake_case = temp[5:]
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
# encryption
_snake_case = apply_table(message, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_snake_case = apply_table(CT, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 54
| 1
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def a__ ( *_a , **_a ) -> Union[str, Any]:
pass
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def a__ ( self , _a , _a , _a ) -> Tuple:
_A : Any = DepthEstimationPipeline(model=_a , image_processor=_a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def a__ ( self , _a , _a ) -> str:
_A : List[Any] = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , _a )
import datasets
_A : int = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
_A : str = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , _a , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def a__ ( self ) -> str:
pass
@slow
@require_torch
def a__ ( self ) -> Optional[Any]:
_A : Tuple = """Intel/dpt-large"""
_A : Dict = pipeline("""depth-estimation""" , model=_a )
_A : Any = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
_A : Dict = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 )
@require_torch
def a__ ( self ) -> int:
# This is highly irregular to have no small tests.
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 54
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Tuple:
_A : Any = size if size is not None else {"""height""": 18, """width""": 18}
_A : Optional[Any] = parent
_A : Union[str, Any] = batch_size
_A : List[Any] = num_channels
_A : List[str] = image_size
_A : Optional[Any] = min_resolution
_A : List[Any] = max_resolution
_A : Optional[Any] = do_resize
_A : str = size
_A : List[str] = do_normalize
_A : Dict = image_mean
_A : int = image_std
def a__ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DPTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = DPTImageProcessingTester(self )
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_A : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def a__ ( self ) -> List[Any]:
# Initialize image_processing
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> List[str]:
# Initialize image_processing
_A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 54
| 1
|
import requests
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Any = {"""Content-Type""": """application/json"""}
_A : Dict = requests.post(snake_case_,json={"""text""": message_body},headers=snake_case_ )
if response.status_code != 200:
_A : Any = (
"""Request to slack returned an error """
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(snake_case_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 54
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_snake_case = logging.get_logger(__name__)
_snake_case = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a=None , _a=None , *_a , **_a ) -> Optional[int]:
super().__init__(*_a , **_a )
if config is None:
assert isinstance(self.model , _a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
_A : Optional[Any] = self.model.config
else:
_A : int = config
_A : Optional[Any] = data_args
_A : int = self.config.tgt_vocab_size if isinstance(self.config , _a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_A : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_A : Union[str, Any] = label_smoothed_nll_loss
def a__ ( self , _a ) -> int:
if self.optimizer is None:
_A : List[str] = ["""bias""", """LayerNorm.weight"""]
_A : str = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_A : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_A : Dict = Adafactor
_A : int = {"""scale_parameter""": False, """relative_step""": False}
else:
_A : int = AdamW
_A : Any = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_A : List[str] = self.args.learning_rate
if self.sharded_ddp:
_A : List[str] = OSS(
params=_a , optim=_a , **_a , )
else:
_A : Tuple = optimizer_cls(_a , **_a )
if self.lr_scheduler is None:
_A : Union[str, Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def a__ ( self , _a ) -> Dict:
_A : List[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_A : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_A : Optional[int] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_A : List[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_a )
return scheduler
def a__ ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def a__ ( self , _a , _a , _a ) -> List[str]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_A : List[str] = model(**_a , use_cache=_a )[0]
_A : Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_A , _A : str = model(**_a , labels=_a , use_cache=_a )[:2]
else:
# compute label smoothed loss
_A : Any = model(**_a , use_cache=_a )[0]
_A : Union[str, Any] = torch.nn.functional.log_softmax(_a , dim=-1 )
_A , _A : List[str] = self.loss_fn(_a , _a , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def a__ ( self , _a , _a ) -> List[Any]:
_A : Optional[int] = inputs.pop("""labels""" )
_A , _A : Dict = self._compute_loss(_a , _a , _a )
return loss
def a__ ( self , _a , _a , _a , _a = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
_A : int = self._prepare_inputs(_a )
_A : Dict = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_A : List[str] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **_a , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_A : str = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
_A : Any = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_A , _A : Tuple = self._compute_loss(_a , _a , _a )
_A : Any = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_A : List[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_A : Any = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def a__ ( self , _a , _a ) -> Union[str, Any]:
# If PAD token is not defined at least EOS token has to be defined
_A : Optional[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
F''' padded to `max_length`={max_length}''' )
_A : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_A : Dict = tensor
return padded_tensor
| 54
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = KandinskyVaaControlnetPipeline
_a = ["image_embeds", "negative_image_embeds", "hint"]
_a = ["image_embeds", "negative_image_embeds", "hint"]
_a = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> Union[str, Any]:
return 32
@property
def a__ ( self ) -> Any:
return 32
@property
def a__ ( self ) -> str:
return self.time_input_dim
@property
def a__ ( self ) -> Tuple:
return self.time_input_dim * 4
@property
def a__ ( self ) -> Dict:
return 100
@property
def a__ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_A : Dict = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_A : List[Any] = UNetaDConditionModel(**_a )
return model
@property
def a__ ( self ) -> Any:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ ( self ) -> List[Any]:
torch.manual_seed(0 )
_A : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self ) -> Union[str, Any]:
_A : int = self.dummy_unet
_A : Tuple = self.dummy_movq
_A : str = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type="""epsilon""" , thresholding=_a , )
_A : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self , _a , _a=0 ) -> int:
_A : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_A : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
_A : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
_A : Optional[Any] = torch.manual_seed(_a )
else:
_A : List[Any] = torch.Generator(device=_a ).manual_seed(_a )
_A : Union[str, Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> int:
_A : Optional[Any] = """cpu"""
_A : Union[str, Any] = self.get_dummy_components()
_A : str = self.pipeline_class(**_a )
_A : Optional[int] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : Union[str, Any] = pipe(**self.get_dummy_inputs(_a ) )
_A : List[Any] = output.images
_A : Optional[Any] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_A : Optional[Any] = image[0, -3:, -3:, -1]
_A : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Dict = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> Any:
_A : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
_A : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
_A : Optional[Any] = torch.from_numpy(np.array(_a ) ).float() / 255.0
_A : Optional[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_A : Tuple = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_A : Optional[Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
_A : Optional[int] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_A : List[str] = """A robot, 4k photo"""
_A : Union[str, Any] = torch.Generator(device="""cuda""" ).manual_seed(0 )
_A , _A : Union[str, Any] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_A : Tuple = torch.Generator(device="""cuda""" ).manual_seed(0 )
_A : str = pipeline(
image_embeds=_a , negative_image_embeds=_a , hint=_a , generator=_a , num_inference_steps=100 , output_type="""np""" , )
_A : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a , _a )
| 54
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( snake_case_ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case_,snake_case_ ) -> bool:
_A : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A : Optional[int] = mean(
int(is_in_circle(uniform(-1.0,1.0 ),uniform(-1.0,1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
_A : List[str] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0.0,snake_case_ = 1.0,):
return mean(
function_to_integrate(uniform(snake_case_,snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.0,snake_case_ = 1.0 ):
def identity_function(snake_case_ ) -> float:
return x
_A : Any = area_under_curve_estimator(
snake_case_,snake_case_,snake_case_,snake_case_ )
_A : Tuple = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def lowerCAmelCase_ ( snake_case_ ):
def function_to_integrate(snake_case_ ) -> float:
return sqrt(4.0 - x * x )
_A : Optional[int] = area_under_curve_estimator(
snake_case_,snake_case_,0.0,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
| 1
|
def lowerCAmelCase_ ( snake_case_ ):
return str(snake_case_ ) == str(snake_case_ )[::-1]
def lowerCAmelCase_ ( snake_case_ ):
return int(snake_case_ ) + int(str(snake_case_ )[::-1] )
def lowerCAmelCase_ ( snake_case_ = 10000 ):
_A : Dict = []
for num in range(1,snake_case_ ):
_A : Dict = 0
_A : str = num
while iterations < 50:
_A : Any = sum_reverse(snake_case_ )
iterations += 1
if is_palindrome(snake_case_ ):
break
else:
lychrel_nums.append(snake_case_ )
return len(snake_case_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> int:
_A : int = tempfile.mkdtemp()
_A : Union[str, Any] = 8
# DPR tok
_A : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_A : List[str] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_A : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_A : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_A : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_A : Dict = {"""unk_token""": """<unk>"""}
_A : Optional[Any] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_A : List[Any] = os.path.join(_a , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def a__ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def a__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def a__ ( self ) -> str:
_A : Optional[Any] = os.path.join(self.tmpdirname , """rag_tokenizer""" )
_A : int = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_A : Any = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_a )
rag_tokenizer.save_pretrained(_a )
_A : Optional[Any] = RagTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def a__ ( self ) -> str:
_A : Union[str, Any] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
_A : Tuple = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Tuple = tokenizer(_a )
self.assertIsNotNone(_a )
@slow
def a__ ( self ) -> Dict:
_A : Dict = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
_A : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Optional[Any] = tokenizer(_a )
self.assertIsNotNone(_a )
| 54
| 1
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = checkpoint
_A : Optional[Any] = {}
_A : Optional[Any] = vae_state_dict["""encoder.conv_in.weight"""]
_A : Any = vae_state_dict["""encoder.conv_in.bias"""]
_A : int = vae_state_dict["""encoder.conv_out.weight"""]
_A : List[Any] = vae_state_dict["""encoder.conv_out.bias"""]
_A : List[str] = vae_state_dict["""encoder.norm_out.weight"""]
_A : Any = vae_state_dict["""encoder.norm_out.bias"""]
_A : Any = vae_state_dict["""decoder.conv_in.weight"""]
_A : List[Any] = vae_state_dict["""decoder.conv_in.bias"""]
_A : Union[str, Any] = vae_state_dict["""decoder.conv_out.weight"""]
_A : int = vae_state_dict["""decoder.conv_out.bias"""]
_A : Any = vae_state_dict["""decoder.norm_out.weight"""]
_A : List[str] = vae_state_dict["""decoder.norm_out.bias"""]
_A : Any = vae_state_dict["""quant_conv.weight"""]
_A : str = vae_state_dict["""quant_conv.bias"""]
_A : List[Any] = vae_state_dict["""post_quant_conv.weight"""]
_A : Any = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
_A : Any = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
_A : Union[str, Any] = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
_A : List[Any] = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
_A : Tuple = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
_A : Optional[int] = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
_A : Dict = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
_A : int = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
_A : Dict = renew_vae_resnet_paths(snake_case_ )
_A : Tuple = {"""old""": f'''down.{i}.block''', """new""": f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(snake_case_,snake_case_,snake_case_,additional_replacements=[meta_path],config=snake_case_ )
_A : Tuple = [key for key in vae_state_dict if """encoder.mid.block""" in key]
_A : List[str] = 2
for i in range(1,num_mid_res_blocks + 1 ):
_A : Dict = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
_A : Any = renew_vae_resnet_paths(snake_case_ )
_A : Tuple = {"""old""": f'''mid.block_{i}''', """new""": f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(snake_case_,snake_case_,snake_case_,additional_replacements=[meta_path],config=snake_case_ )
_A : List[Any] = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
_A : List[str] = renew_vae_attention_paths(snake_case_ )
_A : str = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(snake_case_,snake_case_,snake_case_,additional_replacements=[meta_path],config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
_A : Dict = num_up_blocks - 1 - i
_A : Any = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
_A : Tuple = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
_A : Union[str, Any] = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
_A : List[str] = renew_vae_resnet_paths(snake_case_ )
_A : Union[str, Any] = {"""old""": f'''up.{block_id}.block''', """new""": f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(snake_case_,snake_case_,snake_case_,additional_replacements=[meta_path],config=snake_case_ )
_A : List[str] = [key for key in vae_state_dict if """decoder.mid.block""" in key]
_A : str = 2
for i in range(1,num_mid_res_blocks + 1 ):
_A : Dict = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
_A : List[Any] = renew_vae_resnet_paths(snake_case_ )
_A : Any = {"""old""": f'''mid.block_{i}''', """new""": f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(snake_case_,snake_case_,snake_case_,additional_replacements=[meta_path],config=snake_case_ )
_A : str = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
_A : List[str] = renew_vae_attention_paths(snake_case_ )
_A : Union[str, Any] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(snake_case_,snake_case_,snake_case_,additional_replacements=[meta_path],config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def lowerCAmelCase_ ( snake_case_,snake_case_,):
# Only support V1
_A : str = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
_A : Any = io.BytesIO(r.content )
_A : Optional[Any] = OmegaConf.load(snake_case_ )
_A : Tuple = 512
_A : str = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
_A : int = {}
with safe_open(snake_case_,framework="""pt""",device="""cpu""" ) as f:
for key in f.keys():
_A : Optional[Any] = f.get_tensor(snake_case_ )
else:
_A : str = torch.load(snake_case_,map_location=snake_case_ )["""state_dict"""]
# Convert the VAE model.
_A : int = create_vae_diffusers_config(snake_case_,image_size=snake_case_ )
_A : Optional[int] = custom_convert_ldm_vae_checkpoint(snake_case_,snake_case_ )
_A : List[str] = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
_snake_case = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 54
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
_A : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_A : str = self.transformer_dir
shutil.copy(
os.path.join(_a , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def a__ ( self ) -> Optional[int]:
_A : List[str] = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def a__ ( self , _a , _a , _a , _a=None ) -> Optional[Any]:
_A : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_A : List[str] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_A : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_A : Optional[int] = black.format_str(_a , mode=_a )
_A : Optional[Any] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_a , """w""" , newline="""\n""" ) as f:
f.write(_a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_a )
with open(_a , """r""" ) as f:
self.assertTrue(f.read() , _a )
def a__ ( self ) -> str:
_A : Union[str, Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _a , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _a ) , )
# Copy consistency with a really long name
_A : List[str] = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , _a , _a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _a , overwrite_result=re.sub("""Bert""" , """TestModel""" , _a ) , )
def a__ ( self ) -> Tuple:
_A : Union[str, Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Any = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_A , _A : Tuple = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
self.assertFalse(_a )
self.assertEqual(_a , _a )
_A , _A : List[str] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_a )
_A : Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_A : Dict = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Optional[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A , _A : Optional[int] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_a , _a )
| 54
| 1
|
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def a__ ( *_a , **_a ) -> Optional[Any]:
pass
@is_pipeline_test
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def a__ ( self , _a , _a , _a ) -> int:
_A : Any = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
_A : Optional[int] = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : List[str] = vqa_pipeline(_a , top_k=1 )
self.assertEqual(
_a , [
[{"""score""": ANY(_a ), """answer""": ANY(_a )}],
[{"""score""": ANY(_a ), """answer""": ANY(_a )}],
] , )
@require_torch
def a__ ( self ) -> Tuple:
_A : Optional[int] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
_A : Dict = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_A : Tuple = """How many cats are there?"""
_A : List[str] = vqa_pipeline(image=_a , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
_a , [{"""score""": ANY(_a ), """answer""": ANY(_a )}, {"""score""": ANY(_a ), """answer""": ANY(_a )}] )
_A : Optional[int] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
_a , [{"""score""": ANY(_a ), """answer""": ANY(_a )}, {"""score""": ANY(_a ), """answer""": ANY(_a )}] )
@slow
@require_torch
def a__ ( self ) -> Dict:
_A : Optional[Any] = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
_A : Optional[int] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_A : List[str] = """How many cats are there?"""
_A : List[str] = vqa_pipeline(image=_a , question=_a , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
_A : Optional[Any] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
_A : Union[str, Any] = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def a__ ( self ) -> Optional[int]:
pass
| 54
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=2048 , _a=128 , _a=1 , _a=512 , _a=30 , _a=4_4100 , ) -> Tuple:
_A : Any = parent
_A : str = batch_size
_A : Union[str, Any] = min_seq_length
_A : int = max_seq_length
_A : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = spectrogram_length
_A : int = feature_size
_A : str = num_audio_channels
_A : Tuple = hop_length
_A : List[str] = chunk_length
_A : Union[str, Any] = sampling_rate
def a__ ( self ) -> Tuple:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a__ ( self , _a=False , _a=False ) -> Optional[int]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : List[Any] = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = TvltFeatureExtractor
def a__ ( self ) -> Any:
_A : int = TvltFeatureExtractionTester(self )
def a__ ( self ) -> List[Any]:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , """spectrogram_length""" ) )
self.assertTrue(hasattr(_a , """feature_size""" ) )
self.assertTrue(hasattr(_a , """num_audio_channels""" ) )
self.assertTrue(hasattr(_a , """hop_length""" ) )
self.assertTrue(hasattr(_a , """chunk_length""" ) )
self.assertTrue(hasattr(_a , """sampling_rate""" ) )
def a__ ( self ) -> Optional[int]:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : int = feat_extract_second.to_dict()
_A : int = dict_first.pop("""mel_filters""" )
_A : Optional[int] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Union[str, Any] = self.feature_extraction_class.from_json_file(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : Union[str, Any] = feat_extract_second.to_dict()
_A : List[Any] = dict_first.pop("""mel_filters""" )
_A : Optional[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Optional[Any]:
# Initialize feature_extractor
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_A : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A : str = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A : Optional[Any] = feature_extractor(
_a , return_tensors="""np""" , sampling_rate=4_4100 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Optional[int] = np.asarray(_a )
_A : Optional[Any] = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def a__ ( self , _a ) -> str:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Dict = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Optional[Any]:
_A : List[str] = self._load_datasamples(1 )
_A : List[str] = TvltFeatureExtractor()
_A : List[Any] = feature_extractor(_a , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A : int = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 54
| 1
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = MgpstrTokenizer
_a = False
_a = {}
_a = False
def a__ ( self ) -> str:
super().setUp()
# fmt: off
_A : Optional[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_A : List[str] = dict(zip(_a , range(len(_a ) ) ) )
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
def a__ ( self , **_a ) -> Optional[Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
_A : str = """tester"""
_A : Union[str, Any] = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def a__ ( self ) -> Optional[Any]:
pass
def a__ ( self ) -> Tuple:
_A : Optional[Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A : int = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
_A : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
_A : Optional[int] = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def a__ ( self ) -> List[Any]:
_A : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A , _A : Any = self.get_input_output_texts(_a )
_A : str = tokenizer.tokenize(_a )
_A : List[Any] = tokenizer.convert_tokens_to_ids(_a )
_A : Union[str, Any] = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_A : List[Any] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
_A : Optional[Any] = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(""" """ , """""" ) , _a )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def a__ ( self ) -> Any:
pass
| 54
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowercase ( UpperCamelCase__ ):
_a = "mctct"
def __init__( self , _a=8065 , _a=1536 , _a=36 , _a=6144 , _a=4 , _a=384 , _a=920 , _a=1e-5 , _a=0.3 , _a="relu" , _a=0.02 , _a=0.3 , _a=0.3 , _a=1 , _a=0 , _a=2 , _a=1 , _a=0.3 , _a=1 , _a=(7,) , _a=(3,) , _a=80 , _a=1 , _a=None , _a="sum" , _a=False , **_a , ) -> List[str]:
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
_A : Dict = vocab_size
_A : Tuple = hidden_size
_A : Optional[int] = num_hidden_layers
_A : List[str] = intermediate_size
_A : int = num_attention_heads
_A : Union[str, Any] = attention_head_dim
_A : Tuple = max_position_embeddings
_A : int = layer_norm_eps
_A : int = layerdrop
_A : Optional[int] = hidden_act
_A : List[Any] = initializer_range
_A : Optional[int] = hidden_dropout_prob
_A : Union[str, Any] = attention_probs_dropout_prob
_A : str = pad_token_id
_A : int = bos_token_id
_A : int = eos_token_id
_A : List[str] = conv_glu_dim
_A : Optional[int] = conv_dropout
_A : Any = num_conv_layers
_A : Optional[Any] = input_feat_per_channel
_A : List[Any] = input_channels
_A : List[str] = conv_channels
_A : int = ctc_loss_reduction
_A : Optional[Any] = ctc_zero_infinity
# prevents config testing fail with exporting to json
_A : str = list(_a )
_A : Union[str, Any] = list(_a )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 54
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = KandinskyVaaImgaImgPipeline
_a = ["image_embeds", "negative_image_embeds", "image"]
_a = [
"image_embeds",
"negative_image_embeds",
"image",
]
_a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> int:
return 32
@property
def a__ ( self ) -> Union[str, Any]:
return 32
@property
def a__ ( self ) -> List[str]:
return self.time_input_dim
@property
def a__ ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> str:
return 100
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_A : Union[str, Any] = UNetaDConditionModel(**_a )
return model
@property
def a__ ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self ) -> int:
_A : Any = self.dummy_unet
_A : List[Any] = self.dummy_movq
_A : str = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_A : int = DDIMScheduler(**_a )
_A : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self , _a , _a=0 ) -> str:
_A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_A : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : Optional[Any] = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((256, 256) )
if str(_a ).startswith("""mps""" ):
_A : Tuple = torch.manual_seed(_a )
else:
_A : str = torch.Generator(device=_a ).manual_seed(_a )
_A : Optional[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> Union[str, Any]:
_A : Dict = """cpu"""
_A : int = self.get_dummy_components()
_A : Optional[int] = self.pipeline_class(**_a )
_A : Any = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = pipe(**self.get_dummy_inputs(_a ) )
_A : Dict = output.images
_A : List[str] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_A : Dict = image[0, -3:, -3:, -1]
_A : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Optional[int] = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> List[str]:
_A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_A : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_A : Dict = """A red cartoon frog, 4k"""
_A : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_A : int = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_A : Dict = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A , _A : List[str] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_A : int = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 54
| 1
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PriorTransformer
_a = "hidden_states"
@property
def a__ ( self ) -> str:
_A : Optional[Any] = 4
_A : List[str] = 8
_A : Optional[Any] = 7
_A : Union[str, Any] = floats_tensor((batch_size, embedding_dim) ).to(_a )
_A : Tuple = floats_tensor((batch_size, embedding_dim) ).to(_a )
_A : Any = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_a )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def a__ ( self , _a=0 ) -> Optional[Any]:
torch.manual_seed(_a )
_A : Dict = 4
_A : str = 8
_A : Optional[Any] = 7
_A : Any = torch.randn((batch_size, embedding_dim) ).to(_a )
_A : Tuple = torch.randn((batch_size, embedding_dim) ).to(_a )
_A : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_a )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def a__ ( self ) -> Union[str, Any]:
return (4, 8)
@property
def a__ ( self ) -> int:
return (4, 8)
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 4,
"""num_layers""": 2,
"""embedding_dim""": 8,
"""num_embeddings""": 7,
"""additional_embeddings""": 4,
}
_A : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def a__ ( self ) -> Union[str, Any]:
_A , _A : Dict = PriorTransformer.from_pretrained(
"""hf-internal-testing/prior-dummy""" , output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(_a )
_A : Optional[int] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def a__ ( self ) -> Any:
_A , _A : List[Any] = self.prepare_init_args_and_inputs_for_common()
_A : Optional[int] = self.model_class(**_a )
_A : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : Optional[int] = [*signature.parameters.keys()]
_A : List[Any] = ["""hidden_states""", """timestep"""]
self.assertListEqual(arg_names[:2] , _a )
def a__ ( self ) -> Any:
_A : Union[str, Any] = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" )
_A : Optional[int] = model.to(_a )
if hasattr(_a , """set_default_attn_processor""" ):
model.set_default_attn_processor()
_A : Tuple = self.get_dummy_seed_input()
with torch.no_grad():
_A : Dict = model(**_a )[0]
_A : List[str] = output[0, :5].flatten().cpu()
print(_a )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
_A : Optional[int] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(_a , _a , rtol=1e-2 ) )
@slow
class lowercase ( unittest.TestCase ):
def a__ ( self , _a=1 , _a=768 , _a=77 , _a=0 ) -> List[str]:
torch.manual_seed(_a )
_A : List[str] = batch_size
_A : Optional[int] = embedding_dim
_A : List[str] = num_embeddings
_A : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(_a )
_A : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(_a )
_A : Dict = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_a )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def a__ ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def a__ ( self , _a , _a ) -> Any:
_A : List[Any] = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" )
model.to(_a )
_A : Optional[Any] = self.get_dummy_seed_input(seed=_a )
with torch.no_grad():
_A : Dict = model(**_a )[0]
assert list(sample.shape ) == [1, 768]
_A : Optional[Any] = sample[0, :8].flatten().cpu()
print(_a )
_A : Tuple = torch.tensor(_a )
assert torch_all_close(_a , _a , atol=1e-3 )
| 54
|
def lowerCAmelCase_ ( snake_case_ = 1000000 ):
_A : Any = limit + 1
_A : Tuple = [0] * limit
for first_term in range(1,snake_case_ ):
for n in range(snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_A : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54
| 1
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
_snake_case = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
_snake_case = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def lowerCAmelCase_ ( snake_case_ ):
_A : int = (images / 2 + 0.5).clamp(0,1 )
_A : List[Any] = images.cpu().permute(0,2,3,1 ).float().numpy()
_A : List[str] = numpy_to_pil(snake_case_ )
return images
def lowerCAmelCase_ ( snake_case_ ):
if images.ndim == 3:
_A : List[str] = images[None, ...]
_A : int = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_A : str = [Image.fromarray(image.squeeze(),mode="""L""" ) for image in images]
else:
_A : Tuple = [Image.fromarray(snake_case_ ) for image in images]
return pil_images
| 54
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowercase ( tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ) -> Any:
super().__init__()
_A : Dict = pad_token_id
_A : List[Any] = max_length
_A : Optional[int] = vocab
_A : Optional[int] = merges
_A : Optional[int] = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> str:
_A : Any = [""" """.join(_a ) for m in tokenizer.bpe_ranks.keys()]
_A : str = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> List[Any]:
_A : Union[str, Any] = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def a__ ( cls , _a ) -> Union[str, Any]:
return cls(**_a )
def a__ ( self ) -> Union[str, Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a__ ( self , _a , _a = None ) -> int:
_A : Optional[int] = self.tf_tokenizer(_a )
_A : Tuple = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
_A : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
_A , _A : Dict = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 54
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_snake_case = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_A : List[Any] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> int:
_A : Dict = """</s>"""
_A : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> Dict:
_A : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_a ) , 1103 )
def a__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> Tuple:
_A : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_A : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
_A : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : Optional[int] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_A : Union[str, Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[str]:
_A : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : Any = """To ensure a smooth flow of bank resolutions."""
_A : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = ["""This is going to be way too long.""" * 150, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Union[str, Any] = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : Tuple = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[Any]:
# fmt: off
_A : List[Any] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Tuple = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[str]:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_A : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : int = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> Dict:
_A : Optional[int] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_A : Any = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 54
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_snake_case = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase ( UpperCamelCase__ ):
_a = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BICUBIC , _a = True , _a = None , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , _a = True , **_a , ) -> None:
super().__init__(**_a )
_A : Dict = size if size is not None else {"""shortest_edge""": 224}
_A : List[str] = get_size_dict(_a , default_to_square=_a )
_A : Optional[int] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_A : Tuple = get_size_dict(_a , default_to_square=_a , param_name="""crop_size""" )
_A : List[str] = do_resize
_A : Optional[Any] = size
_A : int = resample
_A : List[Any] = do_center_crop
_A : Union[str, Any] = crop_size
_A : Optional[Any] = do_rescale
_A : int = rescale_factor
_A : Dict = do_normalize
_A : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_A : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
_A : str = do_convert_rgb
def a__ ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray:
_A : List[str] = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_A : List[Any] = get_resize_output_image_size(_a , size=size["""shortest_edge"""] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def a__ ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
_A : Union[str, Any] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a )
def a__ ( self , _a , _a , _a = None , **_a , ) -> Optional[Any]:
return rescale(_a , scale=_a , data_format=_a , **_a )
def a__ ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def a__ ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_A : Tuple = do_resize if do_resize is not None else self.do_resize
_A : List[str] = size if size is not None else self.size
_A : int = get_size_dict(_a , param_name="""size""" , default_to_square=_a )
_A : Tuple = resample if resample is not None else self.resample
_A : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_A : Optional[int] = crop_size if crop_size is not None else self.crop_size
_A : Union[str, Any] = get_size_dict(_a , param_name="""crop_size""" , default_to_square=_a )
_A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
_A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_A : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_A : Optional[int] = image_mean if image_mean is not None else self.image_mean
_A : Tuple = image_std if image_std is not None else self.image_std
_A : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_A : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_A : Union[str, Any] = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
_A : Optional[Any] = [to_numpy_array(_a ) for image in images]
if do_resize:
_A : Tuple = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
_A : int = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
_A : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_A : List[str] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_A : Union[str, Any] = [to_channel_dimension_format(_a , _a ) for image in images]
_A : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=_a , tensor_type=_a )
| 54
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 1
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
_snake_case = logging.get_logger(__name__)
# General docstring
_snake_case = "MobileNetV1Config"
# Base docstring
_snake_case = "google/mobilenet_v1_1.0_224"
_snake_case = [1, 1024, 7, 7]
# Image classification docstring
_snake_case = "google/mobilenet_v1_1.0_224"
_snake_case = "tabby, tabby cat"
_snake_case = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=None ):
_A : Optional[int] = {}
if isinstance(snake_case_,snake_case_ ):
_A : str = model.mobilenet_va
else:
_A : Any = model
_A : List[str] = """MobilenetV1/Conv2d_0/"""
_A : Dict = backbone.conv_stem.convolution.weight
_A : Tuple = backbone.conv_stem.normalization.bias
_A : int = backbone.conv_stem.normalization.weight
_A : int = backbone.conv_stem.normalization.running_mean
_A : Tuple = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_A : Union[str, Any] = i + 1
_A : Tuple = i * 2
_A : int = backbone.layer[pt_index]
_A : List[str] = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
_A : Optional[int] = pointer.convolution.weight
_A : int = pointer.normalization.bias
_A : int = pointer.normalization.weight
_A : Tuple = pointer.normalization.running_mean
_A : Any = pointer.normalization.running_var
_A : List[str] = backbone.layer[pt_index + 1]
_A : Union[str, Any] = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
_A : Optional[int] = pointer.convolution.weight
_A : List[str] = pointer.normalization.bias
_A : Dict = pointer.normalization.weight
_A : List[str] = pointer.normalization.running_mean
_A : str = pointer.normalization.running_var
if isinstance(snake_case_,snake_case_ ):
_A : str = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
_A : Dict = model.classifier.weight
_A : Optional[Any] = model.classifier.bias
return tf_to_pt_map
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
_A : Optional[Any] = tf.train.list_variables(snake_case_ )
_A : List[str] = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''' )
_A : List[Any] = tf.train.load_variable(snake_case_,snake_case_ )
_A : Tuple = array
# Build TF to PyTorch weights loading map
_A : Any = _build_tf_to_pytorch_map(snake_case_,snake_case_,snake_case_ )
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''' )
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''' )
continue
_A : Tuple = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
_A : Optional[int] = np.transpose(snake_case_,(2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
_A : Any = array.squeeze().transpose()
else:
_A : Optional[int] = np.transpose(snake_case_,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' )
_A : Optional[int] = torch.from_numpy(snake_case_ )
tf_weights.pop(snake_case_,snake_case_ )
tf_weights.pop(name + """/RMSProp""",snake_case_ )
tf_weights.pop(name + """/RMSProp_1""",snake_case_ )
tf_weights.pop(name + """/ExponentialMovingAverage""",snake_case_ )
logger.info(f'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' )
return model
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A , _A : Tuple = features.shape[-2:]
_A , _A : Any = conv_layer.stride
_A , _A : Dict = conv_layer.kernel_size
if in_height % stride_height == 0:
_A : Tuple = max(kernel_height - stride_height,0 )
else:
_A : Dict = max(kernel_height - (in_height % stride_height),0 )
if in_width % stride_width == 0:
_A : Any = max(kernel_width - stride_width,0 )
else:
_A : Tuple = max(kernel_width - (in_width % stride_width),0 )
_A : Dict = pad_along_width // 2
_A : Any = pad_along_width - pad_left
_A : Optional[int] = pad_along_height // 2
_A : Tuple = pad_along_height - pad_top
_A : List[str] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case_,snake_case_,"""constant""",0.0 )
class lowercase ( nn.Module ):
def __init__( self , _a , _a , _a , _a , _a = 1 , _a = 1 , _a = False , _a = True , _a = True , ) -> None:
super().__init__()
_A : List[Any] = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
_A : Tuple = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_A : List[str] = nn.Convad(
in_channels=_a , out_channels=_a , kernel_size=_a , stride=_a , padding=_a , groups=_a , bias=_a , padding_mode="""zeros""" , )
if use_normalization:
_A : Union[str, Any] = nn.BatchNormad(
num_features=_a , eps=config.layer_norm_eps , momentum=0.9997 , affine=_a , track_running_stats=_a , )
else:
_A : Optional[int] = None
if use_activation:
if isinstance(_a , _a ):
_A : List[str] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , _a ):
_A : List[str] = ACTaFN[config.hidden_act]
else:
_A : Optional[int] = config.hidden_act
else:
_A : Tuple = None
def a__ ( self , _a ) -> torch.Tensor:
if self.config.tf_padding:
_A : Tuple = apply_tf_padding(_a , self.convolution )
_A : Tuple = self.convolution(_a )
if self.normalization is not None:
_A : Dict = self.normalization(_a )
if self.activation is not None:
_A : str = self.activation(_a )
return features
class lowercase ( UpperCamelCase__ ):
_a = MobileNetVaConfig
_a = load_tf_weights_in_mobilenet_va
_a = "mobilenet_v1"
_a = "pixel_values"
_a = False
def a__ ( self , _a ) -> None:
if isinstance(_a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_a , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
_snake_case = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_snake_case = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a = True ) -> Union[str, Any]:
super().__init__(_a )
_A : Any = config
_A : Optional[int] = 32
_A : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
_A : List[Any] = MobileNetVaConvLayer(
_a , in_channels=config.num_channels , out_channels=_a , kernel_size=3 , stride=2 , )
_A : Dict = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_A : Union[str, Any] = nn.ModuleList()
for i in range(13 ):
_A : int = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_A : int = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_a , in_channels=_a , out_channels=_a , kernel_size=3 , stride=strides[i] , groups=_a , ) )
self.layer.append(
MobileNetVaConvLayer(
_a , in_channels=_a , out_channels=_a , kernel_size=1 , ) )
_A : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def a__ ( self , _a ) -> List[Any]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_a , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a__ ( self , _a = None , _a = None , _a = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
_A : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_A : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_A : str = self.conv_stem(_a )
_A : int = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_A : str = layer_module(_a )
if output_hidden_states:
_A : Optional[Any] = all_hidden_states + (hidden_states,)
_A : Optional[int] = hidden_states
if self.pooler is not None:
_A : int = torch.flatten(self.pooler(_a ) , start_dim=1 )
else:
_A : Dict = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_a , pooler_output=_a , hidden_states=_a , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a ) -> None:
super().__init__(_a )
_A : Optional[Any] = config.num_labels
_A : Union[str, Any] = MobileNetVaModel(_a )
_A : List[str] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_A : List[Any] = nn.Dropout(config.classifier_dropout_prob , inplace=_a )
_A : Optional[Any] = nn.Linear(_a , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a__ ( self , _a = None , _a = None , _a = None , _a = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
_A : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
_A : Tuple = self.mobilenet_va(_a , output_hidden_states=_a , return_dict=_a )
_A : List[Any] = outputs.pooler_output if return_dict else outputs[1]
_A : Any = self.classifier(self.dropout(_a ) )
_A : str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_A : int = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_A : Tuple = """single_label_classification"""
else:
_A : Optional[Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
_A : Any = MSELoss()
if self.num_labels == 1:
_A : List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_A : str = loss_fct(_a , _a )
elif self.config.problem_type == "single_label_classification":
_A : str = CrossEntropyLoss()
_A : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_A : Tuple = BCEWithLogitsLoss()
_A : Optional[Any] = loss_fct(_a , _a )
if not return_dict:
_A : Dict = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_a , logits=_a , hidden_states=outputs.hidden_states , )
| 54
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> Optional[int]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(_a )
def a__ ( self ) -> Any:
_A : str = self._create_example_records()
_A : List[Any] = Dataset.from_list(_a )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(_a ):
self.assertDictEqual(_a , example_records[i] )
def a__ ( self ) -> List[str]:
_A : Dict = self._create_example_records()
_A : List[str] = Dataset.from_list(_a )
_A : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a__ ( self ) -> str: # checks what happens with missing columns
_A : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A : List[str] = Dataset.from_list(_a )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a__ ( self ) -> Dict: # checks if the type can be inferred from the second record
_A : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A : str = Dataset.from_list(_a )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a__ ( self ) -> Dict:
_A : List[str] = Dataset.from_list([] )
self.assertEqual(len(_a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 54
| 1
|
_snake_case = "Alexander Joslin"
import operator as op
from .stack import Stack
def lowerCAmelCase_ ( snake_case_ ):
_A : int = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
_A : Stack[int] = Stack()
_A : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(snake_case_ ) )
elif i in operators:
# RULE 2
operator_stack.push(snake_case_ )
elif i == ")":
# RULE 4
_A : Any = operator_stack.peek()
operator_stack.pop()
_A : int = operand_stack.peek()
operand_stack.pop()
_A : Optional[Any] = operand_stack.peek()
operand_stack.pop()
_A : Optional[Any] = operators[opr](snake_case_,snake_case_ )
operand_stack.push(snake_case_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_snake_case = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 54
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = list(snake_case_ )
_A : List[Any] = list(snake_case_ )
_A : Tuple = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
_A : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = []
while True:
_A : int = ["""$"""] * len(snake_case_ )
_A : Any = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1,len(snake_case_ ) ):
_A : Tuple = compare_string(binary[i],binary[j] )
if k is False:
_A : str = """*"""
_A : str = """*"""
temp.append("""X""" )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
_A : Dict = list(set(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = []
for minterm in minterms:
_A : Tuple = """"""
for _ in range(snake_case_ ):
_A : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = list(snake_case_ )
_A : Tuple = list(snake_case_ )
_A : Dict = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = []
_A : str = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
_A : Union[str, Any] = 0
_A : Optional[Any] = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
_A : Dict = j
if count == 1:
_A : int = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
_A : int = 0
temp.append(prime_implicants[i] )
while True:
_A : Optional[Any] = 0
_A : Tuple = -1
_A : List[Any] = 0
for i in range(len(snake_case_ ) ):
_A : List[str] = chart[i].count(1 )
if count_n > max_n:
_A : Optional[int] = count_n
_A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
_A : Optional[int] = 0
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
_A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i],binary[j],snake_case_ ):
_A : Union[str, Any] = 1
return chart
def lowerCAmelCase_ ( ):
_A : Dict = int(input("""Enter the no. of variables\n""" ) )
_A : Dict = [
float(snake_case_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_A : int = decimal_to_binary(snake_case_,snake_case_ )
_A : Optional[Any] = check(snake_case_ )
print("""Prime Implicants are:""" )
print(snake_case_ )
_A : int = prime_implicant_chart(snake_case_,snake_case_ )
_A : int = selection(snake_case_,snake_case_ )
print("""Essential Prime Implicants are:""" )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 54
| 1
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowerCAmelCase_ ( snake_case_ ):
_A : str = {}
_A : Tuple = job["""started_at"""]
_A : Dict = job["""completed_at"""]
_A : List[str] = date_parser.parse(snake_case_ )
_A : Union[str, Any] = date_parser.parse(snake_case_ )
_A : List[str] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_A : str = start
_A : List[str] = end
_A : Any = duration_in_min
return job_info
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : str = None
if token is not None:
_A : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Optional[int] = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_A : int = requests.get(snake_case_,headers=snake_case_ ).json()
_A : Optional[int] = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(snake_case_ ) for job in result["""jobs"""]} )
_A : List[str] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[str] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(snake_case_ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
_snake_case = parser.parse_args()
_snake_case = get_job_time(args.workflow_run_id)
_snake_case = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v["duration"]}""")
| 54
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ = None ):
_A : Tuple = word_bank or []
# create a table
_A : int = len(snake_case_ ) + 1
_A : list[list[list[str]]] = []
for _ in range(snake_case_ ):
table.append([] )
# seed value
_A : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case_ )] == word:
_A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case_ )]:
combination.reverse()
return table[len(snake_case_ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 54
| 1
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowercase ( UpperCamelCase__ ):
_a = DistilBertTokenizer
_a = DistilBertTokenizerFast
_a = True
@slow
def a__ ( self ) -> Optional[Any]:
_A : int = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
_A : Optional[int] = tokenizer.encode("""sequence builders""" , add_special_tokens=_a )
_A : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_a )
_A : Any = tokenizer.build_inputs_with_special_tokens(_a )
_A : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 54
|
import operator
def lowerCAmelCase_ ( snake_case_,snake_case_ = False,snake_case_ = None ):
_A : str = operator.lt if reverse else operator.gt
_A : Optional[Any] = solution or []
if not arr:
return solution
_A : Dict = [arr.pop(0 )]
for i, item in enumerate(snake_case_ ):
if _operator(snake_case_,sublist[-1] ):
sublist.append(snake_case_ )
arr.pop(snake_case_ )
# merging sublist into solution list
if not solution:
solution.extend(snake_case_ )
else:
while sublist:
_A : Union[str, Any] = sublist.pop(0 )
for i, xx in enumerate(snake_case_ ):
if not _operator(snake_case_,snake_case_ ):
solution.insert(snake_case_,snake_case_ )
break
else:
solution.append(snake_case_ )
strand_sort(snake_case_,snake_case_,snake_case_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 54
| 1
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
_A : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_A : str = self.transformer_dir
shutil.copy(
os.path.join(_a , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def a__ ( self ) -> Optional[int]:
_A : List[str] = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def a__ ( self , _a , _a , _a , _a=None ) -> Optional[Any]:
_A : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_A : List[str] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_A : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_A : Optional[int] = black.format_str(_a , mode=_a )
_A : Optional[Any] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_a , """w""" , newline="""\n""" ) as f:
f.write(_a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_a )
with open(_a , """r""" ) as f:
self.assertTrue(f.read() , _a )
def a__ ( self ) -> str:
_A : Union[str, Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _a , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _a ) , )
# Copy consistency with a really long name
_A : List[str] = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , _a , _a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _a , overwrite_result=re.sub("""Bert""" , """TestModel""" , _a ) , )
def a__ ( self ) -> Tuple:
_A : Union[str, Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Any = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_A , _A : Tuple = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
self.assertFalse(_a )
self.assertEqual(_a , _a )
_A , _A : List[str] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_a )
_A : Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_A : Dict = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Optional[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A , _A : Optional[int] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_a , _a )
| 54
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_MASKED_LM_MAPPING
_a = TF_MODEL_FOR_MASKED_LM_MAPPING
def a__ ( self ) -> Tuple:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def a__ ( self ) -> Any:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 3_8015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 2_5506, """token_str""": """ accuser"""},
] , )
_A : Tuple = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 3_8015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 2_5506,
"""token_str""": """ accuser""",
},
] , )
_A : List[str] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> str:
_A : Any = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
_A : List[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 3_5676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
] , )
_A : str = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def a__ ( self ) -> Union[str, Any]:
_A : int = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
_A : Optional[Any] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_a , _a )
@slow
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(_a )
@slow
@require_tf
def a__ ( self ) -> Tuple:
_A : str = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(_a )
def a__ ( self , _a ) -> Tuple:
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
_A : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2790,
"""token_str""": """ Lyon""",
},
] , )
_A : Optional[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> Tuple:
_A : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
_A : str = None
_A : Union[str, Any] = None
self.run_pipeline_test(_a , [] )
@require_tf
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
_A : Any = None
_A : Dict = None
self.run_pipeline_test(_a , [] )
def a__ ( self , _a , _a , _a ) -> Any:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
_A : Optional[Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Tuple = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def a__ ( self , _a , _a ) -> Dict:
_A : Dict = fill_masker.tokenizer
_A : List[str] = fill_masker.model
_A : List[str] = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Optional[Any] = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
with self.assertRaises(_a ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_a ):
fill_masker("""This is""" )
self.run_test_top_k(_a , _a )
self.run_test_targets(_a , _a )
self.run_test_top_k_targets(_a , _a )
self.fill_mask_with_duplicate_targets_and_top_k(_a , _a )
self.fill_mask_with_multiple_masks(_a , _a )
def a__ ( self , _a , _a ) -> List[str]:
_A : int = tokenizer.get_vocab()
_A : str = sorted(vocab.keys() )[:2]
# Pipeline argument
_A : Tuple = FillMaskPipeline(model=_a , tokenizer=_a , targets=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Call argument
_A : str = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : int = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Score equivalence
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Optional[int] = [top_mask["""token_str"""] for top_mask in outputs]
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ) == set(_a ):
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
# Raises with invalid
with self.assertRaises(_a ):
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_a ):
_A : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(_a ):
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets="""""" )
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : str = FillMaskPipeline(model=_a , tokenizer=_a , top_k=2 )
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Union[str, Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> List[Any]:
_A : Union[str, Any] = tokenizer.get_vocab()
_A : int = FillMaskPipeline(model=_a , tokenizer=_a )
# top_k=2, ntargets=3
_A : List[str] = sorted(vocab.keys() )[:3]
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_a )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_A : Any = [el["""token_str"""] for el in sorted(_a , key=lambda _a : x["score"] , reverse=_a )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ).issubset(_a ):
_A : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_a )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> str:
_A : Optional[int] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
_A : Optional[Any] = sorted(vocab.keys() )[:3]
_A : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_A : Union[str, Any] = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=_a , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_a ) , 3 )
def a__ ( self , _a , _a ) -> Tuple:
_A : Any = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[Any] = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
| 54
| 1
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def a__ ( self , _a , _a , _a ) -> List[Any]:
_A : str = TextaTextGenerationPipeline(model=_a , tokenizer=_a )
return generator, ["Something to write", "Something else"]
def a__ ( self , _a , _a ) -> Union[str, Any]:
_A : List[Any] = generator("""Something there""" )
self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
_A : Tuple = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
_A : Union[str, Any] = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
with self.assertRaises(_a ):
generator(4 )
@require_torch
def a__ ( self ) -> List[Any]:
_A : str = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
_A : Dict = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
_A : List[str] = 3
_A : List[str] = generator(
"""Something there""" , num_return_sequences=_a , num_beams=_a , )
_A : Union[str, Any] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a , _a )
_A : Union[str, Any] = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
_A : str = generator.model.config.eos_token_id
_A : str = """<pad>"""
_A : int = generator(
["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def a__ ( self ) -> Union[str, Any]:
_A : Dict = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
_A : Any = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
| 54
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def a__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
_A : int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_A : str = dict(zip(_a , range(len(_a ) ) ) )
_A : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
_A : str = {"""unk_token""": """<unk>"""}
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , **_a ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> str:
_A : Tuple = """lower newer"""
_A : Optional[Any] = """lower newer"""
return input_text, output_text
def a__ ( self ) -> List[Any]:
_A : Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A : str = """lower newer"""
_A : List[str] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
_A : int = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_A : str = tokens + [tokenizer.unk_token]
_A : Dict = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
@require_ftfy
def a__ ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : Dict = self.tokenizer_class.from_pretrained(_a , **_a )
_A : str = self.rust_tokenizer_class.from_pretrained(_a , **_a )
_A : List[str] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_A : Tuple = """xa\u0303y""" + """ """ + """x\xe3y"""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of space type
_A : Any = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_A : Optional[int] = tokenizer_s.tokenize(_a )
_A : List[Any] = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of line break type
_A : int = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_A : Optional[Any] = tokenizer_s.tokenize(_a )
_A : Tuple = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : List[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_A : str = F'''{text_of_1_token} {text_of_1_token}'''
_A : str = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Dict = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
_A : Any = F''' {text}'''
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Optional[int] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , )
def a__ ( self ) -> List[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_a ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def a__ ( self ) -> str:
super().test_tokenization_python_rust_equals()
def a__ ( self ) -> Union[str, Any]:
# CLIP always lower cases letters
pass
| 54
| 1
|
import numpy as np
def lowerCAmelCase_ ( snake_case_ ):
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_ ( snake_case_ ):
return vector * sigmoid(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
from datetime import datetime as dt
import os
from github import Github
_snake_case = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCAmelCase_ ( ):
_A : int = Github(os.environ["""GITHUB_TOKEN"""] )
_A : Tuple = g.get_repo("""huggingface/transformers""" )
_A : Dict = repo.get_issues(state="""open""" )
for issue in open_issues:
_A : Optional[Any] = sorted([comment for comment in issue.get_comments()],key=lambda snake_case_ : i.created_at,reverse=snake_case_ )
_A : Dict = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 54
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class lowercase ( UpperCamelCase__ ):
_a = "openai-gpt"
_a = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _a=4_0478 , _a=512 , _a=768 , _a=12 , _a=12 , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1e-5 , _a=0.02 , _a="cls_index" , _a=True , _a=None , _a=True , _a=0.1 , **_a , ) -> Optional[Any]:
_A : Optional[Any] = vocab_size
_A : Optional[int] = n_positions
_A : Tuple = n_embd
_A : Optional[Any] = n_layer
_A : Any = n_head
_A : Optional[int] = afn
_A : int = resid_pdrop
_A : List[Any] = embd_pdrop
_A : List[Any] = attn_pdrop
_A : int = layer_norm_epsilon
_A : Tuple = initializer_range
_A : Tuple = summary_type
_A : List[Any] = summary_use_proj
_A : int = summary_activation
_A : int = summary_first_dropout
_A : Union[str, Any] = summary_proj_to_labels
super().__init__(**_a )
| 54
|
from __future__ import annotations
class lowercase :
def __init__( self , _a = 0 ) -> str:
_A : Any = key
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_a , _a ) )
except OSError:
return False
return True
def a__ ( self , _a , _a ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_a , _a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 54
| 1
|
class lowercase : # Public class to implement a graph
def __init__( self , _a , _a , _a ) -> None:
_A : Optional[int] = row
_A : Optional[Any] = col
_A : Any = graph
def a__ ( self , _a , _a , _a ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def a__ ( self , _a , _a , _a ) -> None:
# Checking all 8 elements surrounding nth element
_A : Dict = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_A : int = [-1, 0, 1, -1, 1, -1, 0, 1]
_A : Optional[int] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _a )
def a__ ( self ) -> int: # And finally, count all islands.
_A : Union[str, Any] = [[False for j in range(self.COL )] for i in range(self.ROW )]
_A : Dict = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_a , _a , _a )
count += 1
return count
| 54
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=10 , _a=160 , _a=8 , _a=0.0 , _a=4000 , _a=False , _a=True , ) -> Optional[int]:
_A : Any = parent
_A : List[Any] = batch_size
_A : List[Any] = min_seq_length
_A : Dict = max_seq_length
_A : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = padding_value
_A : Tuple = sampling_rate
_A : str = return_attention_mask
_A : Any = do_normalize
_A : Union[str, Any] = feature_size
_A : List[Any] = chunk_length
_A : List[Any] = hop_length
def a__ ( self ) -> List[str]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , _a=False , _a=False ) -> List[str]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : Any = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = WhisperFeatureExtractor if is_speech_available() else None
def a__ ( self ) -> Tuple:
_A : Optional[int] = WhisperFeatureExtractionTester(self )
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Tuple = feat_extract_first.to_dict()
_A : List[Any] = feat_extract_second.to_dict()
_A : List[Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Dict:
_A : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Dict = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Optional[int] = self.feature_extraction_class.from_json_file(_a )
_A : str = feat_extract_first.to_dict()
_A : Any = feat_extract_second.to_dict()
_A : Union[str, Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Any = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_A : Dict = feature_extractor(_a , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_A : List[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_A : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : Tuple = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_A : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Any = np.asarray(_a )
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : int = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test truncation required
_A : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs]
_A : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs_truncated]
_A : Optional[int] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : List[Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> Dict:
import torch
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
_A : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_A : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ ( self , _a ) -> Dict:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Optional[Any] = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Tuple:
# fmt: off
_A : Dict = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_A : Dict = self._load_datasamples(1 )
_A : Optional[Any] = WhisperFeatureExtractor()
_A : Optional[Any] = feature_extractor(_a , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1e-4 ) )
def a__ ( self ) -> str:
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : str = self._load_datasamples(1 )[0]
_A : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_A : List[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1e-3 ) )
| 54
| 1
|
from math import loga
def lowerCAmelCase_ ( snake_case_ ):
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(snake_case_,snake_case_ ):
raise TypeError("""Input value must be a 'int' type""" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase_ ( snake_case_ ):
return data[1:] + data[0]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = """"""
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = int("""0b""" + data[0] + data[-1],2 )
_A : Any = int("""0b""" + data[1:3],2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[str] = message[:4]
_A : List[Any] = message[4:]
_A : Union[str, Any] = apply_table(snake_case_,snake_case_ )
_A : List[Any] = xor(snake_case_,snake_case_ )
_A : Optional[Any] = apply_sbox(snake_case_,temp[:4] ) # noqa: E741
_A : List[Any] = apply_sbox(snake_case_,temp[4:] )
_A : int = """0""" * (2 - len(snake_case_ )) + l # noqa: E741
_A : Union[str, Any] = """0""" * (2 - len(snake_case_ )) + r
_A : List[Any] = apply_table(l + r,snake_case_ )
_A : Any = xor(snake_case_,snake_case_ )
return temp + right
if __name__ == "__main__":
_snake_case = input("Enter 10 bit key: ")
_snake_case = input("Enter 8 bit message: ")
_snake_case = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case = [2, 4, 3, 1]
_snake_case = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case = apply_table(key, paa_table)
_snake_case = temp[:5]
_snake_case = temp[5:]
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
# encryption
_snake_case = apply_table(message, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_snake_case = apply_table(CT, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 54
| 1
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ ):
_A : List[str] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_A : List[Any] = 128
elif "12-12" in model_name:
_A : str = 12
_A : Tuple = 12
elif "14-14" in model_name:
_A : Any = 14
_A : Tuple = 14
elif "16-16" in model_name:
_A : Any = 16
_A : Optional[Any] = 16
else:
raise ValueError("""Model not supported""" )
_A : Tuple = """huggingface/label-files"""
if "speech-commands" in model_name:
_A : Optional[Any] = 35
_A : Optional[Any] = """speech-commands-v2-id2label.json"""
else:
_A : Optional[int] = 527
_A : int = """audioset-id2label.json"""
_A : str = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) )
_A : List[str] = {int(snake_case_ ): v for k, v in idalabel.items()}
_A : Optional[int] = idalabel
_A : Tuple = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( snake_case_ ):
if "module.v" in name:
_A : Union[str, Any] = name.replace("""module.v""","""audio_spectrogram_transformer""" )
if "cls_token" in name:
_A : Dict = name.replace("""cls_token""","""embeddings.cls_token""" )
if "dist_token" in name:
_A : List[str] = name.replace("""dist_token""","""embeddings.distillation_token""" )
if "pos_embed" in name:
_A : List[str] = name.replace("""pos_embed""","""embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_A : Tuple = name.replace("""patch_embed.proj""","""embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
_A : Tuple = name.replace("""blocks""","""encoder.layer""" )
if "attn.proj" in name:
_A : List[Any] = name.replace("""attn.proj""","""attention.output.dense""" )
if "attn" in name:
_A : Union[str, Any] = name.replace("""attn""","""attention.self""" )
if "norm1" in name:
_A : List[str] = name.replace("""norm1""","""layernorm_before""" )
if "norm2" in name:
_A : Optional[Any] = name.replace("""norm2""","""layernorm_after""" )
if "mlp.fc1" in name:
_A : str = name.replace("""mlp.fc1""","""intermediate.dense""" )
if "mlp.fc2" in name:
_A : Dict = name.replace("""mlp.fc2""","""output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_A : Tuple = name.replace("""audio_spectrogram_transformer.norm""","""audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
_A : Optional[int] = name.replace("""module.mlp_head.0""","""classifier.layernorm""" )
if "module.mlp_head.1" in name:
_A : str = name.replace("""module.mlp_head.1""","""classifier.dense""" )
return name
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
for key in orig_state_dict.copy().keys():
_A : int = orig_state_dict.pop(snake_case_ )
if "qkv" in key:
_A : Any = key.split(""".""" )
_A : List[Any] = int(key_split[3] )
_A : str = config.hidden_size
if "weight" in key:
_A : int = val[:dim, :]
_A : List[str] = val[dim : dim * 2, :]
_A : Union[str, Any] = val[-dim:, :]
else:
_A : Optional[Any] = val[:dim]
_A : int = val[dim : dim * 2]
_A : Any = val[-dim:]
else:
_A : str = val
return orig_state_dict
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(snake_case_,snake_case_ )
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=False ):
_A : Any = get_audio_spectrogram_transformer_config(snake_case_ )
_A : str = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
_A : Optional[Any] = model_name_to_url[model_name]
_A : Dict = torch.hub.load_state_dict_from_url(snake_case_,map_location="""cpu""" )
# remove some keys
remove_keys(snake_case_ )
# rename some keys
_A : Tuple = convert_state_dict(snake_case_,snake_case_ )
# load 🤗 model
_A : str = ASTForAudioClassification(snake_case_ )
model.eval()
model.load_state_dict(snake_case_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_A : Any = -4.2_67_73_93 if """speech-commands""" not in model_name else -6.84_59_78
_A : Tuple = 4.5_68_99_74 if """speech-commands""" not in model_name else 5.5_65_45_26
_A : Any = 1024 if """speech-commands""" not in model_name else 128
_A : Union[str, Any] = ASTFeatureExtractor(mean=snake_case_,std=snake_case_,max_length=snake_case_ )
if "speech-commands" in model_name:
_A : int = load_dataset("""speech_commands""","""v0.02""",split="""validation""" )
_A : Optional[int] = dataset[0]["""audio"""]["""array"""]
else:
_A : List[str] = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""",filename="""sample_audio.flac""",repo_type="""dataset""",)
_A , _A : List[Any] = torchaudio.load(snake_case_ )
_A : Union[str, Any] = waveform.squeeze().numpy()
_A : List[str] = feature_extractor(snake_case_,sampling_rate=16000,return_tensors="""pt""" )
# forward pass
_A : str = model(**snake_case_ )
_A : Optional[Any] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_A : Optional[int] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_A : Any = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_A : Any = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_A : Any = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_A : str = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_A : int = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_A : Union[str, Any] = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
_A : int = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3],snake_case_,atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(snake_case_ )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f'''MIT/{model_name}''' )
feature_extractor.push_to_hub(f'''MIT/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_snake_case = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 54
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Tuple:
_A : Any = size if size is not None else {"""height""": 18, """width""": 18}
_A : Optional[Any] = parent
_A : Union[str, Any] = batch_size
_A : List[Any] = num_channels
_A : List[str] = image_size
_A : Optional[Any] = min_resolution
_A : List[Any] = max_resolution
_A : Optional[Any] = do_resize
_A : str = size
_A : List[str] = do_normalize
_A : Dict = image_mean
_A : int = image_std
def a__ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DPTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = DPTImageProcessingTester(self )
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_A : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def a__ ( self ) -> List[Any]:
# Initialize image_processing
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> List[str]:
# Initialize image_processing
_A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 54
| 1
|
import fire
from utils import calculate_rouge, save_json
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=None,**snake_case_ ):
_A : Dict = [x.strip() for x in open(snake_case_ ).readlines()]
_A : Optional[int] = [x.strip() for x in open(snake_case_ ).readlines()][: len(snake_case_ )]
_A : List[Any] = calculate_rouge(snake_case_,snake_case_,**snake_case_ )
if save_path is not None:
save_json(snake_case_,snake_case_,indent=snake_case_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 54
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_snake_case = logging.get_logger(__name__)
_snake_case = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a=None , _a=None , *_a , **_a ) -> Optional[int]:
super().__init__(*_a , **_a )
if config is None:
assert isinstance(self.model , _a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
_A : Optional[Any] = self.model.config
else:
_A : int = config
_A : Optional[Any] = data_args
_A : int = self.config.tgt_vocab_size if isinstance(self.config , _a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_A : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_A : Union[str, Any] = label_smoothed_nll_loss
def a__ ( self , _a ) -> int:
if self.optimizer is None:
_A : List[str] = ["""bias""", """LayerNorm.weight"""]
_A : str = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_A : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_A : Dict = Adafactor
_A : int = {"""scale_parameter""": False, """relative_step""": False}
else:
_A : int = AdamW
_A : Any = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_A : List[str] = self.args.learning_rate
if self.sharded_ddp:
_A : List[str] = OSS(
params=_a , optim=_a , **_a , )
else:
_A : Tuple = optimizer_cls(_a , **_a )
if self.lr_scheduler is None:
_A : Union[str, Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def a__ ( self , _a ) -> Dict:
_A : List[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_A : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_A : Optional[int] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_A : List[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_a )
return scheduler
def a__ ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def a__ ( self , _a , _a , _a ) -> List[str]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_A : List[str] = model(**_a , use_cache=_a )[0]
_A : Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_A , _A : str = model(**_a , labels=_a , use_cache=_a )[:2]
else:
# compute label smoothed loss
_A : Any = model(**_a , use_cache=_a )[0]
_A : Union[str, Any] = torch.nn.functional.log_softmax(_a , dim=-1 )
_A , _A : List[str] = self.loss_fn(_a , _a , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def a__ ( self , _a , _a ) -> List[Any]:
_A : Optional[int] = inputs.pop("""labels""" )
_A , _A : Dict = self._compute_loss(_a , _a , _a )
return loss
def a__ ( self , _a , _a , _a , _a = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
_A : int = self._prepare_inputs(_a )
_A : Dict = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_A : List[str] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **_a , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_A : str = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
_A : Any = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_A , _A : Tuple = self._compute_loss(_a , _a , _a )
_A : Any = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_A : List[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_A : Any = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def a__ ( self , _a , _a ) -> Union[str, Any]:
# If PAD token is not defined at least EOS token has to be defined
_A : Optional[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
F''' padded to `max_length`={max_length}''' )
_A : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_A : Dict = tensor
return padded_tensor
| 54
| 1
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ ):
if len(snake_case_ ) == 0:
return array
_A , _A : Tuple = min(snake_case_ ), max(snake_case_ )
# Compute the variables
_A : Tuple = _max - _min + 1
_A , _A : str = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_A : Union[str, Any] = i - _min
_A : List[str] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_A : Tuple = 0
for i in range(snake_case_ ):
while holes_repeat[i] > 0:
_A : Union[str, Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input("Enter numbers separated by comma:\n")
_snake_case = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 54
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( snake_case_ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case_,snake_case_ ) -> bool:
_A : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A : Optional[int] = mean(
int(is_in_circle(uniform(-1.0,1.0 ),uniform(-1.0,1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
_A : List[str] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0.0,snake_case_ = 1.0,):
return mean(
function_to_integrate(uniform(snake_case_,snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.0,snake_case_ = 1.0 ):
def identity_function(snake_case_ ) -> float:
return x
_A : Any = area_under_curve_estimator(
snake_case_,snake_case_,snake_case_,snake_case_ )
_A : Tuple = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def lowerCAmelCase_ ( snake_case_ ):
def function_to_integrate(snake_case_ ) -> float:
return sqrt(4.0 - x * x )
_A : Optional[int] = area_under_curve_estimator(
snake_case_,snake_case_,0.0,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowerCAmelCase_ ( snake_case_=None ):
_A : List[str] = argparse.ArgumentParser(add_help=snake_case_,allow_abbrev=snake_case_ )
# The main config parser
_A : Union[str, Any] = config_command_parser(snake_case_ )
# The subparser to add commands to
_A : str = config_parser.add_subparsers(title="""subcommands""",dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(snake_case_,parents=[parent_parser] )
update_command_parser(snake_case_,parents=[parent_parser] )
return config_parser
def lowerCAmelCase_ ( ):
_A : Optional[int] = get_config_parser()
_A : Union[str, Any] = config_parser.parse_args()
if not hasattr(snake_case_,"""func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(snake_case_ )
if __name__ == "__main__":
main()
| 54
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> int:
_A : int = tempfile.mkdtemp()
_A : Union[str, Any] = 8
# DPR tok
_A : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_A : List[str] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_A : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_A : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_A : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_A : Dict = {"""unk_token""": """<unk>"""}
_A : Optional[Any] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_A : List[Any] = os.path.join(_a , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def a__ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def a__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def a__ ( self ) -> str:
_A : Optional[Any] = os.path.join(self.tmpdirname , """rag_tokenizer""" )
_A : int = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_A : Any = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_a )
rag_tokenizer.save_pretrained(_a )
_A : Optional[Any] = RagTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def a__ ( self ) -> str:
_A : Union[str, Any] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
_A : Tuple = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Tuple = tokenizer(_a )
self.assertIsNotNone(_a )
@slow
def a__ ( self ) -> Dict:
_A : Dict = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
_A : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Optional[Any] = tokenizer(_a )
self.assertIsNotNone(_a )
| 54
| 1
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = FunnelTokenizer
_a = FunnelTokenizerFast
_a = True
_a = True
def a__ ( self ) -> List[Any]:
super().setUp()
_A : Union[str, Any] = [
"""<unk>""",
"""<cls>""",
"""<sep>""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def a__ ( self , **_a ) -> Dict:
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , **_a ) -> Optional[Any]:
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> Any:
_A : int = """UNwant\u00E9d,running"""
_A : Union[str, Any] = """unwanted, running"""
return input_text, output_text
def a__ ( self ) -> Optional[Any]:
_A : List[str] = self.tokenizer_class(self.vocab_file )
_A : int = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_a , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 10, 8, 9] )
def a__ ( self ) -> Union[str, Any]:
_A : Any = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
_A : str = tokenizer("""UNwant\u00E9d,running""" )
_A : List[Any] = len(inputs["""input_ids"""] ) - 1
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len )
_A : Tuple = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" )
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
| 54
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
_A : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_A : str = self.transformer_dir
shutil.copy(
os.path.join(_a , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def a__ ( self ) -> Optional[int]:
_A : List[str] = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def a__ ( self , _a , _a , _a , _a=None ) -> Optional[Any]:
_A : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_A : List[str] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_A : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_A : Optional[int] = black.format_str(_a , mode=_a )
_A : Optional[Any] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_a , """w""" , newline="""\n""" ) as f:
f.write(_a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_a )
with open(_a , """r""" ) as f:
self.assertTrue(f.read() , _a )
def a__ ( self ) -> str:
_A : Union[str, Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _a , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _a ) , )
# Copy consistency with a really long name
_A : List[str] = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , _a , _a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _a , overwrite_result=re.sub("""Bert""" , """TestModel""" , _a ) , )
def a__ ( self ) -> Tuple:
_A : Union[str, Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Any = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_A , _A : Tuple = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
self.assertFalse(_a )
self.assertEqual(_a , _a )
_A , _A : List[str] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_a )
_A : Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_A : Dict = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Optional[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A , _A : Optional[int] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_a , _a )
| 54
| 1
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A , _A : Union[str, Any] = position
_A : Dict = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_A : int = []
for position in positions:
_A , _A : Optional[int] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(snake_case_ )
return permissible_positions
def lowerCAmelCase_ ( snake_case_ ):
return not any(elem == 0 for row in board for elem in row )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if is_complete(snake_case_ ):
return True
for position in get_valid_pos(snake_case_,len(snake_case_ ) ):
_A , _A : List[Any] = position
if board[y][x] == 0:
_A : Tuple = curr + 1
if open_knight_tour_helper(snake_case_,snake_case_,curr + 1 ):
return True
_A : int = 0
return False
def lowerCAmelCase_ ( snake_case_ ):
_A : int = [[0 for i in range(snake_case_ )] for j in range(snake_case_ )]
for i in range(snake_case_ ):
for j in range(snake_case_ ):
_A : int = 1
if open_knight_tour_helper(snake_case_,(i, j),1 ):
return board
_A : List[str] = 0
_A : List[str] = f'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=2048 , _a=128 , _a=1 , _a=512 , _a=30 , _a=4_4100 , ) -> Tuple:
_A : Any = parent
_A : str = batch_size
_A : Union[str, Any] = min_seq_length
_A : int = max_seq_length
_A : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = spectrogram_length
_A : int = feature_size
_A : str = num_audio_channels
_A : Tuple = hop_length
_A : List[str] = chunk_length
_A : Union[str, Any] = sampling_rate
def a__ ( self ) -> Tuple:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a__ ( self , _a=False , _a=False ) -> Optional[int]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : List[Any] = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = TvltFeatureExtractor
def a__ ( self ) -> Any:
_A : int = TvltFeatureExtractionTester(self )
def a__ ( self ) -> List[Any]:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , """spectrogram_length""" ) )
self.assertTrue(hasattr(_a , """feature_size""" ) )
self.assertTrue(hasattr(_a , """num_audio_channels""" ) )
self.assertTrue(hasattr(_a , """hop_length""" ) )
self.assertTrue(hasattr(_a , """chunk_length""" ) )
self.assertTrue(hasattr(_a , """sampling_rate""" ) )
def a__ ( self ) -> Optional[int]:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : int = feat_extract_second.to_dict()
_A : int = dict_first.pop("""mel_filters""" )
_A : Optional[int] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Union[str, Any] = self.feature_extraction_class.from_json_file(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : Union[str, Any] = feat_extract_second.to_dict()
_A : List[Any] = dict_first.pop("""mel_filters""" )
_A : Optional[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Optional[Any]:
# Initialize feature_extractor
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_A : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A : str = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A : Optional[Any] = feature_extractor(
_a , return_tensors="""np""" , sampling_rate=4_4100 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Optional[int] = np.asarray(_a )
_A : Optional[Any] = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def a__ ( self , _a ) -> str:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Dict = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Optional[Any]:
_A : List[str] = self._load_datasamples(1 )
_A : List[str] = TvltFeatureExtractor()
_A : List[Any] = feature_extractor(_a , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A : int = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 54
| 1
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowercase ( unittest.TestCase ):
@require_torch
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
_A : str = load_dataset("""ashraq/esc50""" )
_A : Optional[Any] = dataset["""train"""]["""audio"""][-1]["""array"""]
_A : List[Any] = audio_classifier(_a , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_a ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def a__ ( self ) -> int:
pass
@slow
@require_torch
def a__ ( self ) -> int:
_A : Any = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
_A : Tuple = load_dataset("""ashraq/esc50""" )
_A : Union[str, Any] = dataset["""train"""]["""audio"""][-1]["""array"""]
_A : str = audio_classifier(_a , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_a ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
_A : Optional[int] = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_a ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
_A : str = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(_a ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def a__ ( self ) -> List[str]:
pass
| 54
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 1
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if b == 0:
return (1, 0)
((_A) , (_A)) : Optional[Any] = extended_euclid(snake_case_,a % b )
_A : Optional[Any] = a // b
return (y, x - k * y)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
((_A) , (_A)) : Tuple = extended_euclid(snake_case_,snake_case_ )
_A : str = na * na
_A : Tuple = ra * x * na + ra * y * na
return (n % m + m) % m
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
((_A) , (_A)) : Tuple = extended_euclid(snake_case_,snake_case_ )
if b < 0:
_A : List[str] = (b % n + n) % n
return b
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A , _A : List[str] = invert_modulo(snake_case_,snake_case_ ), invert_modulo(snake_case_,snake_case_ )
_A : Union[str, Any] = na * na
_A : Optional[int] = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 54
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = KandinskyVaaImgaImgPipeline
_a = ["image_embeds", "negative_image_embeds", "image"]
_a = [
"image_embeds",
"negative_image_embeds",
"image",
]
_a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> int:
return 32
@property
def a__ ( self ) -> Union[str, Any]:
return 32
@property
def a__ ( self ) -> List[str]:
return self.time_input_dim
@property
def a__ ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> str:
return 100
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_A : Union[str, Any] = UNetaDConditionModel(**_a )
return model
@property
def a__ ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self ) -> int:
_A : Any = self.dummy_unet
_A : List[Any] = self.dummy_movq
_A : str = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_A : int = DDIMScheduler(**_a )
_A : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self , _a , _a=0 ) -> str:
_A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_A : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : Optional[Any] = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((256, 256) )
if str(_a ).startswith("""mps""" ):
_A : Tuple = torch.manual_seed(_a )
else:
_A : str = torch.Generator(device=_a ).manual_seed(_a )
_A : Optional[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> Union[str, Any]:
_A : Dict = """cpu"""
_A : int = self.get_dummy_components()
_A : Optional[int] = self.pipeline_class(**_a )
_A : Any = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = pipe(**self.get_dummy_inputs(_a ) )
_A : Dict = output.images
_A : List[str] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_A : Dict = image[0, -3:, -3:, -1]
_A : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Optional[int] = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> List[str]:
_A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_A : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_A : Dict = """A red cartoon frog, 4k"""
_A : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_A : int = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_A : Dict = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A , _A : List[str] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_A : int = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 54
| 1
|
def lowerCAmelCase_ ( snake_case_ = 10,snake_case_ = 22 ):
_A : List[str] = range(1,snake_case_ )
_A : str = range(1,snake_case_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"""{solution(10, 22) = }""")
| 54
|
def lowerCAmelCase_ ( snake_case_ = 1000000 ):
_A : Any = limit + 1
_A : Tuple = [0] * limit
for first_term in range(1,snake_case_ ):
for n in range(snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_A : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowercase ( UpperCamelCase__ ):
_a = "microsoft/speecht5_tts"
_a = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_a = "text_reader"
_a = SpeechTaProcessor
_a = SpeechTaForTextToSpeech
_a = SpeechTaHifiGan
_a = ["text"]
_a = ["audio"]
def a__ ( self ) -> str:
if self.post_processor is None:
_A : List[str] = """microsoft/speecht5_hifigan"""
super().setup()
def a__ ( self , _a , _a=None ) -> Optional[Any]:
_A : Union[str, Any] = self.pre_processor(text=_a , return_tensors="""pt""" , truncation=_a )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_A : Optional[int] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_A : Union[str, Any] = torch.tensor(embeddings_dataset[7305]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def a__ ( self , _a ) -> List[Any]:
with torch.no_grad():
return self.model.generate_speech(**_a )
def a__ ( self , _a ) -> List[str]:
with torch.no_grad():
return self.post_processor(_a ).cpu().detach()
| 54
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowercase ( tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ) -> Any:
super().__init__()
_A : Dict = pad_token_id
_A : List[Any] = max_length
_A : Optional[int] = vocab
_A : Optional[int] = merges
_A : Optional[int] = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> str:
_A : Any = [""" """.join(_a ) for m in tokenizer.bpe_ranks.keys()]
_A : str = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> List[Any]:
_A : Union[str, Any] = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def a__ ( cls , _a ) -> Union[str, Any]:
return cls(**_a )
def a__ ( self ) -> Union[str, Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a__ ( self , _a , _a = None ) -> int:
_A : Optional[int] = self.tf_tokenizer(_a )
_A : Tuple = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
_A : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
_A , _A : Dict = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 54
| 1
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_snake_case = logging.get_logger(__name__)
# General docstring
_snake_case = "PoolFormerConfig"
# Base docstring
_snake_case = "sail/poolformer_s12"
_snake_case = [1, 512, 7, 7]
# Image classification docstring
_snake_case = "sail/poolformer_s12"
_snake_case = "tabby, tabby cat"
_snake_case = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.0,snake_case_ = False ):
if drop_prob == 0.0 or not training:
return input
_A : List[str] = 1 - drop_prob
_A : Optional[int] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_A : str = keep_prob + torch.rand(snake_case_,dtype=input.dtype,device=input.device )
random_tensor.floor_() # binarize
_A : List[str] = input.div(snake_case_ ) * random_tensor
return output
class lowercase ( nn.Module ):
def __init__( self , _a = None ) -> None:
super().__init__()
_A : List[str] = drop_prob
def a__ ( self , _a ) -> torch.Tensor:
return drop_path(_a , self.drop_prob , self.training )
def a__ ( self ) -> str:
return "p={}".format(self.drop_prob )
class lowercase ( nn.Module ):
def __init__( self , _a , _a , _a , _a , _a , _a=None ) -> List[str]:
super().__init__()
_A : int = patch_size if isinstance(_a , collections.abc.Iterable ) else (patch_size, patch_size)
_A : Optional[Any] = stride if isinstance(_a , collections.abc.Iterable ) else (stride, stride)
_A : Dict = padding if isinstance(_a , collections.abc.Iterable ) else (padding, padding)
_A : Optional[Any] = nn.Convad(_a , _a , kernel_size=_a , stride=_a , padding=_a )
_A : str = norm_layer(_a ) if norm_layer else nn.Identity()
def a__ ( self , _a ) -> List[Any]:
_A : Any = self.projection(_a )
_A : List[Any] = self.norm(_a )
return embeddings
class lowercase ( nn.GroupNorm ):
def __init__( self , _a , **_a ) -> List[Any]:
super().__init__(1 , _a , **_a )
class lowercase ( nn.Module ):
def __init__( self , _a ) -> int:
super().__init__()
_A : Union[str, Any] = nn.AvgPoolad(_a , stride=1 , padding=pool_size // 2 , count_include_pad=_a )
def a__ ( self , _a ) -> Tuple:
return self.pool(_a ) - hidden_states
class lowercase ( nn.Module ):
def __init__( self , _a , _a , _a , _a ) -> int:
super().__init__()
_A : Tuple = nn.Convad(_a , _a , 1 )
_A : Tuple = nn.Convad(_a , _a , 1 )
_A : Any = PoolFormerDropPath(_a )
if isinstance(config.hidden_act , _a ):
_A : Optional[int] = ACTaFN[config.hidden_act]
else:
_A : Optional[Any] = config.hidden_act
def a__ ( self , _a ) -> Any:
_A : Tuple = self.conva(_a )
_A : int = self.act_fn(_a )
_A : List[str] = self.drop(_a )
_A : Union[str, Any] = self.conva(_a )
_A : Optional[int] = self.drop(_a )
return hidden_states
class lowercase ( nn.Module ):
def __init__( self , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]:
super().__init__()
_A : List[str] = PoolFormerPooling(_a )
_A : int = PoolFormerOutput(_a , _a , _a , _a )
_A : Optional[Any] = PoolFormerGroupNorm(_a )
_A : Optional[Any] = PoolFormerGroupNorm(_a )
# Useful for training neural nets
_A : List[Any] = PoolFormerDropPath(_a ) if drop_path > 0.0 else nn.Identity()
_A : List[str] = config.use_layer_scale
if config.use_layer_scale:
_A : Tuple = nn.Parameter(
config.layer_scale_init_value * torch.ones((_a) ) , requires_grad=_a )
_A : Tuple = nn.Parameter(
config.layer_scale_init_value * torch.ones((_a) ) , requires_grad=_a )
def a__ ( self , _a ) -> Union[str, Any]:
if self.use_layer_scale:
_A : Optional[Any] = self.pooling(self.before_norm(_a ) )
_A : Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_A : Union[str, Any] = hidden_states + self.drop_path(_a )
_A : List[Any] = ()
_A : List[str] = self.output(self.after_norm(_a ) )
_A : Optional[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_A : List[Any] = hidden_states + self.drop_path(_a )
_A : Union[str, Any] = (output,) + outputs
return outputs
else:
_A : int = self.drop_path(self.pooling(self.before_norm(_a ) ) )
# First residual connection
_A : int = pooling_output + hidden_states
_A : Dict = ()
# Second residual connection inside the PoolFormerOutput block
_A : int = self.drop_path(self.output(self.after_norm(_a ) ) )
_A : Union[str, Any] = hidden_states + layer_output
_A : Dict = (output,) + outputs
return outputs
class lowercase ( nn.Module ):
def __init__( self , _a ) -> Optional[int]:
super().__init__()
_A : Union[str, Any] = config
# stochastic depth decay rule
_A : Union[str, Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_A : Optional[Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_A : Optional[int] = nn.ModuleList(_a )
# Transformer blocks
_A : Optional[Any] = []
_A : Tuple = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_A : List[Any] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_a , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(_a ) )
_A : List[Any] = nn.ModuleList(_a )
def a__ ( self , _a , _a=False , _a=True ) -> Tuple:
_A : Tuple = () if output_hidden_states else None
_A : List[str] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_A , _A : Union[str, Any] = layers
# Get patch embeddings from hidden_states
_A : str = embedding_layer(_a )
# Send the embeddings through the blocks
for _, blk in enumerate(_a ):
_A : Optional[Any] = blk(_a )
_A : List[str] = layer_outputs[0]
if output_hidden_states:
_A : Any = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_a , hidden_states=_a )
class lowercase ( UpperCamelCase__ ):
_a = PoolFormerConfig
_a = "poolformer"
_a = "pixel_values"
_a = True
def a__ ( self , _a ) -> Tuple:
if isinstance(_a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_a , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def a__ ( self , _a , _a=False ) -> Union[str, Any]:
if isinstance(_a , _a ):
_A : List[str] = value
_snake_case = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_snake_case = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a ) -> List[str]:
super().__init__(_a )
_A : Any = config
_A : Union[str, Any] = PoolFormerEncoder(_a )
# Initialize weights and apply final processing
self.post_init()
def a__ ( self ) -> str:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_a , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a__ ( self , _a = None , _a = None , _a = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
_A : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_A : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_A : Any = self.encoder(
_a , output_hidden_states=_a , return_dict=_a , )
_A : Optional[int] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_a , hidden_states=encoder_outputs.hidden_states , )
class lowercase ( nn.Module ):
def __init__( self , _a ) -> Tuple:
super().__init__()
_A : List[str] = nn.Linear(config.hidden_size , config.hidden_size )
def a__ ( self , _a ) -> Tuple:
_A : str = self.dense(_a )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a ) -> List[Any]:
super().__init__(_a )
_A : Tuple = config.num_labels
_A : Optional[Any] = PoolFormerModel(_a )
# Final norm
_A : Optional[int] = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_A : str = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a__ ( self , _a = None , _a = None , _a = None , _a = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
_A : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_A : Optional[Any] = self.poolformer(
_a , output_hidden_states=_a , return_dict=_a , )
_A : Tuple = outputs[0]
_A : Any = self.classifier(self.norm(_a ).mean([-2, -1] ) )
_A : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_A : Union[str, Any] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_A : Optional[int] = """single_label_classification"""
else:
_A : List[Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
_A : Union[str, Any] = MSELoss()
if self.num_labels == 1:
_A : str = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_A : Optional[Any] = loss_fct(_a , _a )
elif self.config.problem_type == "single_label_classification":
_A : List[str] = CrossEntropyLoss()
_A : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_A : List[Any] = BCEWithLogitsLoss()
_A : Dict = loss_fct(_a , _a )
if not return_dict:
_A : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_a , logits=_a , hidden_states=outputs.hidden_states )
| 54
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_A : List[Any] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> int:
_A : Dict = """</s>"""
_A : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> Dict:
_A : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_a ) , 1103 )
def a__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> Tuple:
_A : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_A : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
_A : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : Optional[int] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_A : Union[str, Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[str]:
_A : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : Any = """To ensure a smooth flow of bank resolutions."""
_A : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = ["""This is going to be way too long.""" * 150, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Union[str, Any] = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : Tuple = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[Any]:
# fmt: off
_A : List[Any] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Tuple = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[str]:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_A : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : int = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> Dict:
_A : Optional[int] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_A : Any = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 54
| 1
|
import collections
import os
import re
from pathlib import Path
_snake_case = "src/transformers"
# Matches is_xxx_available()
_snake_case = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
_snake_case = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_snake_case = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
_snake_case = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
_snake_case = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_snake_case = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
_snake_case = re.compile(r"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
_snake_case = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
_snake_case = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
_snake_case = re.compile(r"^\s*try:")
# Catches a line with else:
_snake_case = re.compile(r"^\s*else:")
def lowerCAmelCase_ ( snake_case_ ):
if _re_test_backend.search(snake_case_ ) is None:
return None
_A : Union[str, Any] = [b[0] for b in _re_backend.findall(snake_case_ )]
backends.sort()
return "_and_".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
with open(snake_case_,"""r""",encoding="""utf-8""",newline="""\n""" ) as f:
_A : str = f.readlines()
_A : Any = 0
while line_index < len(snake_case_ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(snake_case_ ):
return None
# First grab the objects without a specific backend in _import_structure
_A : Optional[int] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
_A : int = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(snake_case_ ):
_A : Optional[int] = _re_one_line_import_struct.search(snake_case_ ).groups()[0]
_A : Optional[Any] = re.findall(r"""\[([^\]]+)\]""",snake_case_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
_A : Union[str, Any] = _re_import_struct_key_value.search(snake_case_ )
if single_line_import_search is not None:
_A : int = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
_A : Union[str, Any] = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_A : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
_A : Dict = lines[line_index]
if _re_import_struct_add_one.search(snake_case_ ) is not None:
objects.append(_re_import_struct_add_one.search(snake_case_ ).groups()[0] )
elif _re_import_struct_add_many.search(snake_case_ ) is not None:
_A : int = _re_import_struct_add_many.search(snake_case_ ).groups()[0].split(""", """ )
_A : Any = [obj[1:-1] for obj in imports if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif _re_between_brackets.search(snake_case_ ) is not None:
_A : List[str] = _re_between_brackets.search(snake_case_ ).groups()[0].split(""", """ )
_A : Union[str, Any] = [obj[1:-1] for obj in imports if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif _re_quote_object.search(snake_case_ ) is not None:
objects.append(_re_quote_object.search(snake_case_ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
_A : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_A : Dict = []
while (
line_index < len(snake_case_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
_A : Dict = lines[line_index]
_A : Tuple = _re_import.search(snake_case_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
_A : Union[str, Any] = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(snake_case_ ):
# If the line is an if is_backend_available, we grab all objects associated.
_A : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
_A : Dict = lines[line_index]
_A : Any = _re_import.search(snake_case_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
_A : Optional[int] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
def find_duplicates(snake_case_ ):
return [k for k, v in collections.Counter(snake_case_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_A : Any = []
for key in import_dict_objects.keys():
_A : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_A : int = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_A : str = """base imports""" if key == """none""" else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def lowerCAmelCase_ ( ):
_A : str = []
for root, _, files in os.walk(snake_case_ ):
if "__init__.py" in files:
_A : List[str] = os.path.join(snake_case_,"""__init__.py""" )
_A : Optional[int] = parse_init(snake_case_ )
if objects is not None:
_A : List[Any] = analyze_results(*snake_case_ )
if len(snake_case_ ) > 0:
_A : str = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(snake_case_ ) )
if len(snake_case_ ) > 0:
raise ValueError("""\n\n""".join(snake_case_ ) )
def lowerCAmelCase_ ( ):
_A : Any = []
for path, directories, files in os.walk(snake_case_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(snake_case_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(snake_case_ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
_A : Union[str, Any] = str((Path(snake_case_ ) / folder).relative_to(snake_case_ ) )
_A : List[Any] = short_path.replace(os.path.sep,""".""" )
submodules.append(snake_case_ )
for fname in files:
if fname == "__init__.py":
continue
_A : Optional[int] = str((Path(snake_case_ ) / fname).relative_to(snake_case_ ) )
_A : str = short_path.replace(""".py""","""""" ).replace(os.path.sep,""".""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(snake_case_ )
return submodules
_snake_case = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def lowerCAmelCase_ ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_A : List[Any] = direct_transformers_import(snake_case_ )
_A : Tuple = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(snake_case_,"""__init__.py""" ),"""r""" ) as f:
_A : Optional[int] = f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""",snake_case_ ) ) )
_A : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(snake_case_ ) > 0:
_A : Dict = """\n""".join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
f'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 54
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 1
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_snake_case = Lock()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0,10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_A : Dict = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_A : int = min(snake_case_,snake_case_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_A : List[Any] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_A : List[Any] = max(snake_case_,snake_case_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = []
_A : List[str] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_A : List[Any] = Pipe()
_A : Any = Pipe()
process_array_.append(
Process(
target=snake_case_,args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]),) )
_A : Tuple = temp_rs
_A : str = temp_rr
for i in range(1,len(snake_case_ ) - 1 ):
_A : Union[str, Any] = Pipe()
_A : Union[str, Any] = Pipe()
process_array_.append(
Process(
target=snake_case_,args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]),) )
_A : int = temp_rs
_A : Union[str, Any] = temp_rr
process_array_.append(
Process(
target=snake_case_,args=(
len(snake_case_ ) - 1,
arr[len(snake_case_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case_ ) - 1],
),) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0,len(snake_case_ ) ):
_A : Optional[int] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCAmelCase_ ( ):
_A : int = list(range(10,0,-1 ) )
print("""Initial List""" )
print(*snake_case_ )
_A : Tuple = odd_even_transposition(snake_case_ )
print("""Sorted List\n""" )
print(*snake_case_ )
if __name__ == "__main__":
main()
| 54
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> Optional[int]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(_a )
def a__ ( self ) -> Any:
_A : str = self._create_example_records()
_A : List[Any] = Dataset.from_list(_a )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(_a ):
self.assertDictEqual(_a , example_records[i] )
def a__ ( self ) -> List[str]:
_A : Dict = self._create_example_records()
_A : List[str] = Dataset.from_list(_a )
_A : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a__ ( self ) -> str: # checks what happens with missing columns
_A : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A : List[str] = Dataset.from_list(_a )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a__ ( self ) -> Dict: # checks if the type can be inferred from the second record
_A : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A : str = Dataset.from_list(_a )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a__ ( self ) -> Dict:
_A : List[str] = Dataset.from_list([] )
self.assertEqual(len(_a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 54
| 1
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> int:
_A : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , """embed_dim""" ) )
self.parent.assertTrue(hasattr(_a , """num_heads""" ) )
class lowercase :
def __init__( self , _a , _a=13 , _a=64 , _a=3 , _a=[16, 48, 96] , _a=[1, 3, 6] , _a=[1, 2, 10] , _a=[7, 3, 3] , _a=[4, 2, 2] , _a=[2, 1, 1] , _a=[2, 2, 2] , _a=[False, False, True] , _a=[0.0, 0.0, 0.0] , _a=0.02 , _a=1e-12 , _a=True , _a=True , _a=2 , ) -> Optional[int]:
_A : str = parent
_A : Union[str, Any] = batch_size
_A : int = image_size
_A : Optional[int] = patch_sizes
_A : List[str] = patch_stride
_A : Any = patch_padding
_A : int = is_training
_A : List[str] = use_labels
_A : Tuple = num_labels
_A : Tuple = num_channels
_A : Optional[int] = embed_dim
_A : List[str] = num_heads
_A : Union[str, Any] = stride_kv
_A : Union[str, Any] = depth
_A : Dict = cls_token
_A : Dict = attention_drop_rate
_A : List[Any] = initializer_range
_A : Any = layer_norm_eps
def a__ ( self ) -> Dict:
_A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : Union[str, Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
_A : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
_A : List[str] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> List[Any]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a ) -> Optional[Any]:
_A : Dict = TFCvtModel(config=_a )
_A : int = model(_a , training=_a )
_A : Union[str, Any] = (self.image_size, self.image_size)
_A , _A : Union[str, Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_A : Optional[int] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_A : Dict = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def a__ ( self , _a , _a , _a ) -> List[str]:
_A : Any = self.num_labels
_A : str = TFCvtForImageClassification(_a )
_A : List[Any] = model(_a , labels=_a , training=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self ) -> Tuple:
_A : List[Any] = self.prepare_config_and_inputs()
_A , _A , _A : Tuple = config_and_inputs
_A : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
_a = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = TFCvtModelTester(self )
_A : str = TFCvtConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> str:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def a__ ( self ) -> List[str]:
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def a__ ( self ) -> List[Any]:
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def a__ ( self ) -> str:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def a__ ( self ) -> Union[str, Any]:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def a__ ( self ) -> List[str]:
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def a__ ( self ) -> int:
_A : Any = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(_a )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def a__ ( self ) -> Union[str, Any]:
_A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : List[Any] = model_class(_a )
_A : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : List[Any] = [*signature.parameters.keys()]
_A : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> Optional[Any]:
def check_hidden_states_output(_a , _a , _a ):
_A : Union[str, Any] = model_class(_a )
_A : Union[str, Any] = model(**self._prepare_for_class(_a , _a ) )
_A : Optional[int] = outputs.hidden_states
_A : Optional[Any] = len(self.model_tester.depth )
self.assertEqual(len(_a ) , _a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_A , _A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Any = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[Any] = True
check_hidden_states_output(_a , _a , _a )
def a__ ( self ) -> Dict:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Tuple:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> Any:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[Any] = TFCvtModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> int:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def a__ ( self ) -> Tuple:
_A : Dict = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_A : Optional[Any] = self.default_image_processor
_A : Tuple = prepare_img()
_A : Optional[Any] = image_processor(images=_a , return_tensors="""tf""" )
# forward pass
_A : int = model(**_a )
# verify the logits
_A : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Union[str, Any] = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _a , atol=1e-4 ) )
| 54
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = list(snake_case_ )
_A : List[Any] = list(snake_case_ )
_A : Tuple = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
_A : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = []
while True:
_A : int = ["""$"""] * len(snake_case_ )
_A : Any = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1,len(snake_case_ ) ):
_A : Tuple = compare_string(binary[i],binary[j] )
if k is False:
_A : str = """*"""
_A : str = """*"""
temp.append("""X""" )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
_A : Dict = list(set(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = []
for minterm in minterms:
_A : Tuple = """"""
for _ in range(snake_case_ ):
_A : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = list(snake_case_ )
_A : Tuple = list(snake_case_ )
_A : Dict = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = []
_A : str = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
_A : Union[str, Any] = 0
_A : Optional[Any] = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
_A : Dict = j
if count == 1:
_A : int = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
_A : int = 0
temp.append(prime_implicants[i] )
while True:
_A : Optional[Any] = 0
_A : Tuple = -1
_A : List[Any] = 0
for i in range(len(snake_case_ ) ):
_A : List[str] = chart[i].count(1 )
if count_n > max_n:
_A : Optional[int] = count_n
_A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
_A : Optional[int] = 0
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
_A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i],binary[j],snake_case_ ):
_A : Union[str, Any] = 1
return chart
def lowerCAmelCase_ ( ):
_A : Dict = int(input("""Enter the no. of variables\n""" ) )
_A : Dict = [
float(snake_case_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_A : int = decimal_to_binary(snake_case_,snake_case_ )
_A : Optional[Any] = check(snake_case_ )
print("""Prime Implicants are:""" )
print(snake_case_ )
_A : int = prime_implicant_chart(snake_case_,snake_case_ )
_A : int = selection(snake_case_,snake_case_ )
print("""Essential Prime Implicants are:""" )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 54
| 1
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
set_seed(770)
_snake_case = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
_snake_case = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
_snake_case = os.path.dirname(os.path.abspath(__file__))
_snake_case = os.path.join(os.path.expanduser("~"), ".cache")
_snake_case = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def lowerCAmelCase_ ( snake_case_,snake_case_=False ):
_A : Tuple = model_type
if use_small:
key += "_small"
return os.path.join(snake_case_,REMOTE_MODEL_PATHS[key]["""file_name"""] )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
os.makedirs(snake_case_,exist_ok=snake_case_ )
hf_hub_download(repo_id=snake_case_,filename=snake_case_,local_dir=snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=False,snake_case_="text" ):
if model_type == "text":
_A : List[Any] = BarkSemanticModel
_A : List[str] = BarkSemanticConfig
_A : List[str] = BarkSemanticGenerationConfig
elif model_type == "coarse":
_A : Any = BarkCoarseModel
_A : str = BarkCoarseConfig
_A : Dict = BarkCoarseGenerationConfig
elif model_type == "fine":
_A : List[Any] = BarkFineModel
_A : Union[str, Any] = BarkFineConfig
_A : Tuple = BarkFineGenerationConfig
else:
raise NotImplementedError()
_A : int = f'''{model_type}_small''' if use_small else model_type
_A : Tuple = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(snake_case_ ):
logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info["""repo_id"""],model_info["""file_name"""] )
_A : Union[str, Any] = torch.load(snake_case_,map_location=snake_case_ )
# this is a hack
_A : Dict = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
_A : Optional[Any] = model_args["""vocab_size"""]
_A : List[str] = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_A : Dict = model_args.pop("""n_head""" )
_A : Optional[Any] = model_args.pop("""n_embd""" )
_A : Union[str, Any] = model_args.pop("""n_layer""" )
_A : List[Any] = ConfigClass(**checkpoint["""model_args"""] )
_A : Optional[Any] = ModelClass(config=snake_case_ )
_A : Optional[Any] = GenerationConfigClass()
_A : Dict = model_generation_config
_A : Tuple = checkpoint["""model"""]
# fixup checkpoint
_A : str = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(snake_case_ ):
# replace part of the key with corresponding layer name in HF implementation
_A : Union[str, Any] = k[len(snake_case_ ) :]
for old_layer_name in new_layer_name_dict:
_A : Optional[Any] = new_k.replace(snake_case_,new_layer_name_dict[old_layer_name] )
_A : Tuple = state_dict.pop(snake_case_ )
_A : Tuple = set(state_dict.keys() ) - set(model.state_dict().keys() )
_A : str = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
_A : Any = set(model.state_dict().keys() ) - set(state_dict.keys() )
_A : Any = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(snake_case_ ) != 0:
raise ValueError(f'''extra keys found: {extra_keys}''' )
if len(snake_case_ ) != 0:
raise ValueError(f'''missing keys: {missing_keys}''' )
model.load_state_dict(snake_case_,strict=snake_case_ )
_A : int = model.num_parameters(exclude_embeddings=snake_case_ )
_A : Optional[Any] = checkpoint["""best_val_loss"""].item()
logger.info(f'''model loaded: {round(n_params/1e6,1 )}M params, {round(snake_case_,3 )} loss''' )
model.eval()
model.to(snake_case_ )
del checkpoint, state_dict
return model
def lowerCAmelCase_ ( snake_case_,snake_case_=False,snake_case_="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_A : Tuple = """cpu""" # do conversion on cpu
_A : Tuple = _get_ckpt_path(snake_case_,use_small=snake_case_ )
_A : List[str] = _load_model(snake_case_,snake_case_,model_type=snake_case_,use_small=snake_case_ )
# load bark initial model
_A : int = _bark_load_model(snake_case_,"""cpu""",model_type=snake_case_,use_small=snake_case_ )
if model_type == "text":
_A : List[Any] = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=snake_case_ ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
_A : List[Any] = 5
_A : List[Any] = 10
if model_type in ["text", "coarse"]:
_A : Any = torch.randint(256,(batch_size, sequence_length),dtype=torch.int )
_A : Optional[Any] = bark_model(snake_case_ )[0]
_A : Union[str, Any] = model(snake_case_ )
# take last logits
_A : Union[str, Any] = output_new_model_total.logits[:, [-1], :]
else:
_A : Tuple = 3
_A : Union[str, Any] = 8
_A : int = torch.randint(256,(batch_size, sequence_length, n_codes_total),dtype=torch.int )
_A : Union[str, Any] = model(snake_case_,snake_case_ )
_A : Dict = bark_model(snake_case_,snake_case_ )
_A : List[str] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,):
_A : List[str] = os.path.join(snake_case_,snake_case_ )
_A : List[str] = BarkSemanticConfig.from_pretrained(os.path.join(snake_case_,"""config.json""" ) )
_A : Union[str, Any] = BarkCoarseConfig.from_pretrained(os.path.join(snake_case_,"""config.json""" ) )
_A : str = BarkFineConfig.from_pretrained(os.path.join(snake_case_,"""config.json""" ) )
_A : Dict = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
_A : Optional[Any] = BarkSemanticModel.from_pretrained(snake_case_ )
_A : List[str] = BarkCoarseModel.from_pretrained(snake_case_ )
_A : Optional[Any] = BarkFineModel.from_pretrained(snake_case_ )
_A : Union[str, Any] = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
_A : List[Any] = BarkConfig.from_sub_model_configs(
snake_case_,snake_case_,snake_case_,snake_case_ )
_A : Union[str, Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config,coarseAcoustic.generation_config,fineAcoustic.generation_config )
_A : List[str] = BarkModel(snake_case_ )
_A : Tuple = semantic
_A : Optional[int] = coarseAcoustic
_A : List[str] = fineAcoustic
_A : Optional[int] = codec
_A : List[str] = bark_generation_config
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
bark.save_pretrained(snake_case_,repo_id=snake_case_,push_to_hub=snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
_snake_case = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 54
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ = None ):
_A : Tuple = word_bank or []
# create a table
_A : int = len(snake_case_ ) + 1
_A : list[list[list[str]]] = []
for _ in range(snake_case_ ):
table.append([] )
# seed value
_A : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case_ )] == word:
_A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case_ )]:
combination.reverse()
return table[len(snake_case_ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 54
| 1
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class lowercase :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> int:
_A : Optional[Any] = parent
_A : List[Any] = 13
_A : Union[str, Any] = 7
_A : List[str] = True
_A : Optional[Any] = True
_A : Optional[int] = True
_A : Optional[Any] = True
_A : List[str] = 99
_A : str = 32
_A : Dict = 2
_A : Dict = 4
_A : Union[str, Any] = 37
_A : Dict = """gelu"""
_A : Union[str, Any] = 0.1
_A : str = 0.1
_A : Tuple = 512
_A : Union[str, Any] = 16
_A : int = 2
_A : Dict = 0.02
_A : Tuple = 3
_A : List[Any] = 4
_A : Union[str, Any] = None
def a__ ( self ) -> Tuple:
_A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : Optional[Any] = None
if self.use_input_mask:
_A : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_A : int = None
if self.use_token_type_ids:
_A : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A : Union[str, Any] = None
_A : Dict = None
_A : int = None
if self.use_labels:
_A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_A : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]:
_A : str = TFRoFormerModel(config=A__ )
_A : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_A : Optional[Any] = [input_ids, input_mask]
_A : Optional[Any] = model(A__ )
_A : Dict = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> str:
_A : Tuple = True
_A : str = TFRoFormerForCausalLM(config=A__ )
_A : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_A : Optional[int] = model(A__ )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict:
_A : Any = TFRoFormerForMaskedLM(config=A__ )
_A : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_A : List[Any] = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]:
_A : Optional[Any] = self.num_labels
_A : Tuple = TFRoFormerForSequenceClassification(config=A__ )
_A : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_A : Tuple = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Any:
_A : Dict = self.num_choices
_A : Any = TFRoFormerForMultipleChoice(config=A__ )
_A : List[Any] = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
_A : List[Any] = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
_A : Tuple = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
_A : List[Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_A : Union[str, Any] = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]:
_A : str = self.num_labels
_A : int = TFRoFormerForTokenClassification(config=A__ )
_A : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_A : List[Any] = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a ) -> Tuple:
_A : Optional[Any] = TFRoFormerForQuestionAnswering(config=A__ )
_A : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_A : Tuple = model(A__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self ) -> List[str]:
_A : str = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : Optional[Any] = config_and_inputs
_A : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _lowerCamelCase,_lowerCamelCase,unittest.TestCase ):
_a = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
_a = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
_a = False
_a = False
def a__ ( self , _a , _a , _a , _a , _a ) -> str:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def a__ ( self ) -> Union[str, Any]:
_A : Dict = TFRoFormerModelTester(self )
_A : Any = ConfigTester(self , config_class=A__ , hidden_size=37 )
def a__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> List[Any]:
_A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def a__ ( self ) -> str:
_A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__ )
def a__ ( self ) -> int:
_A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A__ )
def a__ ( self ) -> Dict:
_A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__ )
def a__ ( self ) -> Union[str, Any]:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__ )
def a__ ( self ) -> Tuple:
_A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__ )
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__ )
@slow
def a__ ( self ) -> str:
_A : List[Any] = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(A__ )
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> Optional[Any]:
_A : Any = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
_A : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
_A : Optional[Any] = model(A__ )[0]
# TODO Replace vocab size
_A : Any = 5_0000
_A : Dict = [1, 6, vocab_size]
self.assertEqual(output.shape , A__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_A : Dict = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A__ , atol=1e-4 )
@require_tf
class lowercase ( unittest.TestCase ):
_a = 1e-4
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = tf.constant([[4, 10]] )
_A : Any = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
_A : Any = emba(input_ids.shape )
_A : Union[str, Any] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A__ , A__ , atol=self.tolerance )
def a__ ( self ) -> List[Any]:
_A : int = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
_A : Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
_A : Optional[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(A__ , A__ , atol=self.tolerance )
@require_tf
class lowercase ( unittest.TestCase ):
_a = 1e-4
def a__ ( self ) -> int:
# 2,12,16,64
_A : int = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_A : int = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_A : Optional[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
_A : int = embed_positions([2, 16, 768] )[None, None, :, :]
_A , _A : Any = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A__ , A__ , A__ )
_A : List[Any] = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
_A : Any = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A__ , atol=self.tolerance )
| 700
|
import operator
def lowerCAmelCase_ ( snake_case_,snake_case_ = False,snake_case_ = None ):
_A : str = operator.lt if reverse else operator.gt
_A : Optional[Any] = solution or []
if not arr:
return solution
_A : Dict = [arr.pop(0 )]
for i, item in enumerate(snake_case_ ):
if _operator(snake_case_,sublist[-1] ):
sublist.append(snake_case_ )
arr.pop(snake_case_ )
# merging sublist into solution list
if not solution:
solution.extend(snake_case_ )
else:
while sublist:
_A : Union[str, Any] = sublist.pop(0 )
for i, xx in enumerate(snake_case_ ):
if not _operator(snake_case_,snake_case_ ):
solution.insert(snake_case_,snake_case_ )
break
else:
solution.append(snake_case_ )
strand_sort(snake_case_,snake_case_,snake_case_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 54
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
_snake_case = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class lowercase ( _snake_case ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_INIT_CONFIGURATION
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = SqueezeBertTokenizer
def __init__( self , _a=None , _a=None , _a=True , _a="[UNK]" , _a="[SEP]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , _a=True , _a=None , **_a , ) -> List[str]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
_A : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
_A : Any = getattr(lowerCAmelCase__ , normalizer_state.pop("""type""" ) )
_A : Optional[int] = do_lower_case
_A : Dict = strip_accents
_A : Any = tokenize_chinese_chars
_A : str = normalizer_class(**lowerCAmelCase__ )
_A : Tuple = do_lower_case
def a__ ( self , _a , _a=None ) -> List[Any]:
_A : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ ( self , _a , _a = None ) -> List[int]:
_A : Optional[int] = [self.sep_token_id]
_A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self , _a , _a = None ) -> Tuple[str]:
_A : str = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 701
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_MASKED_LM_MAPPING
_a = TF_MODEL_FOR_MASKED_LM_MAPPING
def a__ ( self ) -> Tuple:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def a__ ( self ) -> Any:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 3_8015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 2_5506, """token_str""": """ accuser"""},
] , )
_A : Tuple = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 3_8015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 2_5506,
"""token_str""": """ accuser""",
},
] , )
_A : List[str] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> str:
_A : Any = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
_A : List[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 3_5676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
_A : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
] , )
_A : str = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def a__ ( self ) -> Union[str, Any]:
_A : int = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
_A : Optional[Any] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_a , _a )
@slow
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(_a )
@slow
@require_tf
def a__ ( self ) -> Tuple:
_A : str = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(_a )
def a__ ( self , _a ) -> Tuple:
_A : Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
_A : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_a ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2790,
"""token_str""": """ Lyon""",
},
] , )
_A : Optional[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_a ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def a__ ( self ) -> Tuple:
_A : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
_A : str = None
_A : Union[str, Any] = None
self.run_pipeline_test(_a , [] )
@require_tf
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
_A : Any = None
_A : Dict = None
self.run_pipeline_test(_a , [] )
def a__ ( self , _a , _a , _a ) -> Any:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
_A : Optional[Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Tuple = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def a__ ( self , _a , _a ) -> Dict:
_A : Dict = fill_masker.tokenizer
_A : List[str] = fill_masker.model
_A : List[str] = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Optional[Any] = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
with self.assertRaises(_a ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_a ):
fill_masker("""This is""" )
self.run_test_top_k(_a , _a )
self.run_test_targets(_a , _a )
self.run_test_top_k_targets(_a , _a )
self.fill_mask_with_duplicate_targets_and_top_k(_a , _a )
self.fill_mask_with_multiple_masks(_a , _a )
def a__ ( self , _a , _a ) -> List[str]:
_A : int = tokenizer.get_vocab()
_A : str = sorted(vocab.keys() )[:2]
# Pipeline argument
_A : Tuple = FillMaskPipeline(model=_a , tokenizer=_a , targets=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Call argument
_A : str = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : int = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _a )
_A : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_a ) )
# Score equivalence
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Optional[int] = [top_mask["""token_str"""] for top_mask in outputs]
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ) == set(_a ):
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_a )
_A : Union[str, Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
# Raises with invalid
with self.assertRaises(_a ):
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_a ):
_A : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(_a ):
_A : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets="""""" )
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : str = FillMaskPipeline(model=_a , tokenizer=_a , top_k=2 )
_A : str = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
_A : Union[str, Any] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
] , )
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> List[Any]:
_A : Union[str, Any] = tokenizer.get_vocab()
_A : int = FillMaskPipeline(model=_a , tokenizer=_a )
# top_k=2, ntargets=3
_A : List[str] = sorted(vocab.keys() )[:3]
_A : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_a )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_A : Any = [el["""token_str"""] for el in sorted(_a , key=lambda _a : x["score"] , reverse=_a )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_a ).issubset(_a ):
_A : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_a )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_a ) , nested_simplify(_a ) )
def a__ ( self , _a , _a ) -> str:
_A : Optional[int] = FillMaskPipeline(model=_a , tokenizer=_a )
_A : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
_A : Optional[Any] = sorted(vocab.keys() )[:3]
_A : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_A : Union[str, Any] = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=_a , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_a ) , 3 )
def a__ ( self , _a , _a ) -> Tuple:
_A : Any = FillMaskPipeline(model=_a , tokenizer=_a )
_A : Optional[Any] = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_a , [
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
[
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
{"""sequence""": ANY(_a ), """score""": ANY(_a ), """token""": ANY(_a ), """token_str""": ANY(_a )},
],
] , )
| 54
| 0
|
from abc import ABC, abstractmethod
from typing import List, Optional
class lowercase ( snake_case__ ):
def __init__( self ) -> Tuple:
self.test()
def a__ ( self ) -> str:
_A : Any = 0
_A : Dict = False
while not completed:
if counter == 1:
self.reset()
_A : Optional[int] = self.advance()
if not self.does_advance(UpperCAmelCase_ ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
_A , _A , _A : str = self.update(UpperCAmelCase_ )
counter += 1
if counter > 1_0000:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def a__ ( self ) -> Optional[Any]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def a__ ( self , _a ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def a__ ( self , _a ) -> List[str]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def a__ ( self ) -> int:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def a__ ( self ) -> List[str]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def a__ ( self , _a=False ) -> List[Any]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( snake_case__ ):
def __init__( self , _a ) -> str:
super(UpperCAmelCase_ , self ).__init__()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or len(UpperCAmelCase_ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
_A : Dict = token_ids
_A : Optional[int] = len(self.token_ids )
_A : List[str] = -1 # the index of the currently fulfilled step
_A : Tuple = False
def a__ ( self ) -> Dict:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def a__ ( self , _a ) -> Optional[int]:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase_ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def a__ ( self , _a ) -> List[Any]:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase_ )}''' )
_A : Any = False
_A : List[str] = False
_A : List[str] = False
if self.does_advance(UpperCAmelCase_ ):
self.fulfilled_idx += 1
_A : Dict = True
if self.fulfilled_idx == (self.seqlen - 1):
_A : Any = True
_A : Any = completed
else:
# failed to make progress.
_A : str = True
self.reset()
return stepped, completed, reset
def a__ ( self ) -> Optional[Any]:
_A : Dict = False
_A : Optional[Any] = 0
def a__ ( self ) -> Dict:
return self.seqlen - (self.fulfilled_idx + 1)
def a__ ( self , _a=False ) -> List[str]:
_A : Optional[Any] = PhrasalConstraint(self.token_ids )
if stateful:
_A : List[Any] = self.seqlen
_A : int = self.fulfilled_idx
_A : Any = self.completed
return new_constraint
class lowercase :
def __init__( self , _a , _a=True ) -> Union[str, Any]:
_A : Any = max([len(UpperCAmelCase_ ) for one in nested_token_ids] )
_A : Optional[Any] = {}
for token_ids in nested_token_ids:
_A : List[str] = root
for tidx, token_id in enumerate(UpperCAmelCase_ ):
if token_id not in level:
_A : Optional[int] = {}
_A : Optional[Any] = level[token_id]
if no_subsets and self.has_subsets(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
F''' {nested_token_ids}.''' )
_A : Any = root
def a__ ( self , _a ) -> Optional[Any]:
_A : int = self.trie
for current_token in current_seq:
_A : Dict = start[current_token]
_A : str = list(start.keys() )
return next_tokens
def a__ ( self , _a ) -> Optional[int]:
_A : Optional[Any] = self.next_tokens(UpperCAmelCase_ )
return len(UpperCAmelCase_ ) == 0
def a__ ( self , _a ) -> List[str]:
_A : List[Any] = list(root.values() )
if len(UpperCAmelCase_ ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCAmelCase_ ) for nn in next_nodes] )
def a__ ( self , _a , _a ) -> List[Any]:
_A : List[str] = self.count_leaves(UpperCAmelCase_ )
return len(UpperCAmelCase_ ) != leaf_count
class lowercase ( snake_case__ ):
def __init__( self , _a ) -> List[str]:
super(UpperCAmelCase_ , self ).__init__()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or len(UpperCAmelCase_ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
_A : List[Any] = DisjunctiveTrie(UpperCAmelCase_ )
_A : Union[str, Any] = nested_token_ids
_A : int = self.trie.max_height
_A : List[Any] = []
_A : Optional[int] = False
def a__ ( self ) -> List[Any]:
_A : Dict = self.trie.next_tokens(self.current_seq )
if len(UpperCAmelCase_ ) == 0:
return None
else:
return token_list
def a__ ( self , _a ) -> int:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase_ )}''' )
_A : Tuple = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def a__ ( self , _a ) -> Dict:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase_ )}''' )
_A : str = False
_A : str = False
_A : Optional[int] = False
if self.does_advance(UpperCAmelCase_ ):
self.current_seq.append(UpperCAmelCase_ )
_A : List[str] = True
else:
_A : Optional[int] = True
self.reset()
_A : Optional[Any] = self.trie.reached_leaf(self.current_seq )
_A : Any = completed
return stepped, completed, reset
def a__ ( self ) -> Dict:
_A : Optional[int] = False
_A : List[Any] = []
def a__ ( self ) -> int:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def a__ ( self , _a=False ) -> List[Any]:
_A : Tuple = DisjunctiveConstraint(self.token_ids )
if stateful:
_A : Optional[int] = self.seqlen
_A : List[Any] = self.current_seq
_A : List[str] = self.completed
return new_constraint
class lowercase :
def __init__( self , _a ) -> Any:
_A : int = constraints
# max # of steps required to fulfill a given constraint
_A : Optional[Any] = max([c.seqlen for c in constraints] )
_A : Optional[int] = len(UpperCAmelCase_ )
_A : List[str] = False
self.init_state()
def a__ ( self ) -> Dict:
_A : int = []
_A : List[str] = None
_A : Tuple = [constraint.copy(stateful=UpperCAmelCase_ ) for constraint in self.constraints]
def a__ ( self ) -> List[Any]:
_A : Dict = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def a__ ( self ) -> List[Any]:
_A : str = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
_A : str = constraint.advance()
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
token_list.append(UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
token_list.extend(UpperCAmelCase_ )
else:
_A : str = self.inprogress_constraint.advance()
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
token_list.append(UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
token_list.extend(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) == 0:
return None
else:
return token_list
def a__ ( self , _a ) -> Dict:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
_A , _A : Any = self.add(UpperCAmelCase_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def a__ ( self , _a ) -> List[Any]:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
_A , _A : Any = False, False
if self.completed:
_A : int = True
_A : Optional[int] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
_A , _A , _A : Union[str, Any] = self.inprogress_constraint.update(UpperCAmelCase_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCAmelCase_ ) )
_A : Any = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
_A : str = None
if len(self.pending_constraints ) == 0:
# we're done!
_A : List[str] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCAmelCase_ ):
_A , _A , _A : Dict = pending_constraint.update(UpperCAmelCase_ )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(UpperCAmelCase_ )
_A : Optional[Any] = None
if not complete and stepped:
_A : int = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
_A : Any = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
_A : Optional[int] = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def a__ ( self , _a=True ) -> Optional[int]:
_A : Union[str, Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
_A : Optional[Any] = [
constraint.copy(stateful=UpperCAmelCase_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
_A : Any = self.inprogress_constraint.copy(stateful=UpperCAmelCase_ )
_A : Any = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 702
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def a__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
_A : int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_A : str = dict(zip(_a , range(len(_a ) ) ) )
_A : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
_A : str = {"""unk_token""": """<unk>"""}
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , **_a ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> str:
_A : Tuple = """lower newer"""
_A : Optional[Any] = """lower newer"""
return input_text, output_text
def a__ ( self ) -> List[Any]:
_A : Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A : str = """lower newer"""
_A : List[str] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
_A : int = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_A : str = tokens + [tokenizer.unk_token]
_A : Dict = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
@require_ftfy
def a__ ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : Dict = self.tokenizer_class.from_pretrained(_a , **_a )
_A : str = self.rust_tokenizer_class.from_pretrained(_a , **_a )
_A : List[str] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_A : Tuple = """xa\u0303y""" + """ """ + """x\xe3y"""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of space type
_A : Any = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_A : Optional[int] = tokenizer_s.tokenize(_a )
_A : List[Any] = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of line break type
_A : int = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_A : Optional[Any] = tokenizer_s.tokenize(_a )
_A : Tuple = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : List[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_A : str = F'''{text_of_1_token} {text_of_1_token}'''
_A : str = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Dict = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
_A : Any = F''' {text}'''
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Optional[int] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , )
def a__ ( self ) -> List[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_a ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def a__ ( self ) -> str:
super().test_tokenization_python_rust_equals()
def a__ ( self ) -> Union[str, Any]:
# CLIP always lower cases letters
pass
| 54
| 0
|
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_snake_case = re.compile(r"^(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class lowercase :
_a = 42
_a = None
_a = None
_a = None
_a = None
def a__ ( self ) -> List[Any]:
_A , _A , _A : Tuple = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Optional[Any]:
return F'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def a__ ( self ) -> str:
return self.major, self.minor, self.patch
def a__ ( self , _a ) -> Optional[Any]:
if isinstance(A_ , A_ ):
return Version(A_ )
elif isinstance(A_ , A_ ):
return other
raise TypeError(F'''{other} (type {type(A_ )}) cannot be compared to version.''' )
def __eq__( self , _a ) -> int:
try:
_A : List[str] = self._validate_operand(A_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , _a ) -> Any:
_A : Any = self._validate_operand(A_ )
return self.tuple < other.tuple
def __hash__( self ) -> List[Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def a__ ( cls , _a ) -> Optional[Any]:
_A : Dict = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def a__ ( self ) -> str:
return self.version_str
def lowerCAmelCase_ ( snake_case_ ):
_A : List[str] = _VERSION_REG.match(snake_case_ )
if not res:
raise ValueError(f'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(snake_case_ ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def lowerCAmelCase_ ( snake_case_ ):
return ".".join(str(snake_case_ ) for v in version_tuple )
| 703
|
from datetime import datetime as dt
import os
from github import Github
_snake_case = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCAmelCase_ ( ):
_A : int = Github(os.environ["""GITHUB_TOKEN"""] )
_A : Tuple = g.get_repo("""huggingface/transformers""" )
_A : Dict = repo.get_issues(state="""open""" )
for issue in open_issues:
_A : Optional[Any] = sorted([comment for comment in issue.get_comments()],key=lambda snake_case_ : i.created_at,reverse=snake_case_ )
_A : Dict = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 54
| 0
|
import math
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if (
not isinstance(_lowercase,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if (
not isinstance(_lowercase,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
from __future__ import annotations
class lowercase :
def __init__( self , _a = 0 ) -> str:
_A : Any = key
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a ) -> list[str]:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_a ) ^ key ) for ch in content]
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> str:
assert isinstance(_a , _a ) and isinstance(_a , _a )
_A : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_A : List[str] = """"""
for ch in content:
ans += chr(ord(_a ) ^ key )
return ans
def a__ ( self , _a , _a = 0 ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_a , _a ) )
except OSError:
return False
return True
def a__ ( self , _a , _a ) -> bool:
assert isinstance(_a , _a ) and isinstance(_a , _a )
try:
with open(_a ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_a , _a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 54
| 0
|
import argparse
import os
import re
import packaging.version
_snake_case = "examples/"
_snake_case = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
_snake_case = {
"init": "src/diffusers/__init__.py",
"setup": "setup.py",
}
_snake_case = "README.md"
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
with open(_A,"""r""",encoding="""utf-8""",newline="""\n""" ) as f:
_A : Tuple = f.read()
_A : Optional[Any] = REPLACE_PATTERNS[pattern]
_A : Optional[Any] = replace.replace("""VERSION""",_A )
_A : Optional[int] = re_pattern.sub(_A,_A )
with open(_A,"""w""",encoding="""utf-8""",newline="""\n""" ) as f:
f.write(_A )
def lowerCAmelCase_ ( snake_case_ ):
for folder, directories, fnames in os.walk(_A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_A,_A ),_A,pattern="""examples""" )
def lowerCAmelCase_ ( snake_case_,snake_case_=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_A,_A,_A )
if not patch:
update_version_in_examples(_A )
def lowerCAmelCase_ ( ):
_A : List[Any] = "🤗 Transformers currently provides the following architectures"
_A : Optional[int] = "1. Want to contribute a new model?"
with open(_A,"""r""",encoding="""utf-8""",newline="""\n""" ) as f:
_A : Any = f.readlines()
# Find the start of the list.
_A : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_A : Union[str, Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
_A : int = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""","""https://huggingface.co/docs/diffusers/model_doc""",)
index += 1
with open(_A,"""w""",encoding="""utf-8""",newline="""\n""" ) as f:
f.writelines(_A )
def lowerCAmelCase_ ( ):
with open(REPLACE_FILES["""init"""],"""r""" ) as f:
_A : List[str] = f.read()
_A : List[Any] = REPLACE_PATTERNS["init"][0].search(_A ).groups()[0]
return packaging.version.parse(_A )
def lowerCAmelCase_ ( snake_case_=False ):
_A : Optional[int] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
_A : List[str] = default_version.base_version
elif patch:
_A : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
_A : Any = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
_A : str = input(f'''Which version are you releasing? [{default_version}]''' )
if len(_A ) == 0:
_A : str = default_version
print(f'''Updating version to {version}.''' )
global_version_update(_A,patch=_A )
def lowerCAmelCase_ ( ):
_A : Union[str, Any] = get_version()
_A : Tuple = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
_A : List[str] = current_version.base_version
# Check with the user we got that right.
_A : Tuple = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(_A ) == 0:
_A : int = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(_A )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
_snake_case = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 705
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=10 , _a=160 , _a=8 , _a=0.0 , _a=4000 , _a=False , _a=True , ) -> Optional[int]:
_A : Any = parent
_A : List[Any] = batch_size
_A : List[Any] = min_seq_length
_A : Dict = max_seq_length
_A : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = padding_value
_A : Tuple = sampling_rate
_A : str = return_attention_mask
_A : Any = do_normalize
_A : Union[str, Any] = feature_size
_A : List[Any] = chunk_length
_A : List[Any] = hop_length
def a__ ( self ) -> List[str]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , _a=False , _a=False ) -> List[str]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : Any = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = WhisperFeatureExtractor if is_speech_available() else None
def a__ ( self ) -> Tuple:
_A : Optional[int] = WhisperFeatureExtractionTester(self )
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Tuple = feat_extract_first.to_dict()
_A : List[Any] = feat_extract_second.to_dict()
_A : List[Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Dict:
_A : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Dict = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Optional[int] = self.feature_extraction_class.from_json_file(_a )
_A : str = feat_extract_first.to_dict()
_A : Any = feat_extract_second.to_dict()
_A : Union[str, Any] = feat_extract_first.mel_filters
_A : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_A : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_A : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : Any = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_A : Dict = feature_extractor(_a , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_A : List[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
_A : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : Tuple = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_A : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Any = np.asarray(_a )
_A : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : int = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test truncation required
_A : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs]
_A : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
_A : Union[str, Any] = [np.asarray(_a ) for speech_input in speech_inputs_truncated]
_A : Optional[int] = feature_extractor(_a , return_tensors="""np""" ).input_features
_A : List[Any] = feature_extractor(_a , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def a__ ( self ) -> Dict:
import torch
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
_A : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_A : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_A : Optional[int] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ ( self , _a ) -> Dict:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Optional[Any] = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Tuple:
# fmt: off
_A : Dict = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_A : Dict = self._load_datasamples(1 )
_A : Optional[Any] = WhisperFeatureExtractor()
_A : Optional[Any] = feature_extractor(_a , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1e-4 ) )
def a__ ( self ) -> str:
_A : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_A : str = self._load_datasamples(1 )[0]
_A : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_A : List[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1e-3 ) )
| 54
| 0
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
_snake_case = logging.getLogger(__name__)
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=30522, type=int)
_snake_case = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, "rb") as fp:
_snake_case = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
_snake_case = Counter()
for tk_ids in data:
counter.update(tk_ids)
_snake_case = [0] * args.vocab_size
for k, v in counter.items():
_snake_case = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 706
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase_ ( snake_case_ ):
return data[1:] + data[0]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = """"""
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = int("""0b""" + data[0] + data[-1],2 )
_A : Any = int("""0b""" + data[1:3],2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[str] = message[:4]
_A : List[Any] = message[4:]
_A : Union[str, Any] = apply_table(snake_case_,snake_case_ )
_A : List[Any] = xor(snake_case_,snake_case_ )
_A : Optional[Any] = apply_sbox(snake_case_,temp[:4] ) # noqa: E741
_A : List[Any] = apply_sbox(snake_case_,temp[4:] )
_A : int = """0""" * (2 - len(snake_case_ )) + l # noqa: E741
_A : Union[str, Any] = """0""" * (2 - len(snake_case_ )) + r
_A : List[Any] = apply_table(l + r,snake_case_ )
_A : Any = xor(snake_case_,snake_case_ )
return temp + right
if __name__ == "__main__":
_snake_case = input("Enter 10 bit key: ")
_snake_case = input("Enter 8 bit message: ")
_snake_case = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case = [2, 4, 3, 1]
_snake_case = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case = apply_table(key, paa_table)
_snake_case = temp[:5]
_snake_case = temp[5:]
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
# encryption
_snake_case = apply_table(message, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_snake_case = apply_table(CT, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 54
| 0
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = get_failure_array(snake_case_ )
# 2) Step through text searching for pattern
_A : Dict = 0, 0 # index into text, pattern
while i < len(snake_case_ ):
if pattern[j] == text[i]:
if j == (len(snake_case_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_A : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def lowerCAmelCase_ ( snake_case_ ):
_A : int = [0]
_A : List[Any] = 0
_A : Union[str, Any] = 1
while j < len(snake_case_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_A : Dict = failure[i - 1]
continue
j += 1
failure.append(snake_case_ )
return failure
if __name__ == "__main__":
# Test 1)
_snake_case = "abc1abc12"
_snake_case = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_snake_case = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
_snake_case = "ABABX"
_snake_case = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
_snake_case = "AAAB"
_snake_case = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
_snake_case = "abcdabcy"
_snake_case = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
_snake_case = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 707
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Tuple:
_A : Any = size if size is not None else {"""height""": 18, """width""": 18}
_A : Optional[Any] = parent
_A : Union[str, Any] = batch_size
_A : List[Any] = num_channels
_A : List[str] = image_size
_A : Optional[Any] = min_resolution
_A : List[Any] = max_resolution
_A : Optional[Any] = do_resize
_A : str = size
_A : List[str] = do_normalize
_A : Dict = image_mean
_A : int = image_std
def a__ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DPTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = DPTImageProcessingTester(self )
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_A : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def a__ ( self ) -> List[Any]:
# Initialize image_processing
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> List[str]:
# Initialize image_processing
_A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 54
| 0
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_snake_case = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def lowerCAmelCase_ ( snake_case_ ):
for pegasus_name, hf_name in PATTERNS:
_A : Optional[int] = k.replace(__A,__A )
return k
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = DEFAULTS.copy()
cfg_kwargs.update(__A )
_A : Dict = PegasusConfig(**__A )
_A : Optional[int] = PegasusForConditionalGeneration(__A )
_A : Union[str, Any] = torch_model.model.state_dict()
_A : Union[str, Any] = {}
for k, v in tf_weights.items():
_A : Dict = rename_state_dict_key(__A )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
_A : Union[str, Any] = v.T
_A : Any = torch.tensor(__A,dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
_A : int = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
_A : Dict = mapping['''shared.weight''']
_A : List[str] = mapping['''shared.weight''']
_A : Optional[int] = {k: torch.zeros_like(__A ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__A )
_A : List[str] = torch_model.model.load_state_dict(__A,strict=__A )
_A : Optional[int] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def lowerCAmelCase_ ( snake_case_="./ckpt/aeslc/model.ckpt-32000" ):
_A : Dict = tf.train.list_variables(__A )
_A : str = {}
_A : Union[str, Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(__A,desc="""converting tf checkpoint to dict""" ):
_A : int = any(pat in name for pat in ignore_name )
if skip_key:
continue
_A : List[Any] = tf.train.load_variable(__A,__A )
_A : Tuple = array
return tf_weights
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Any = Path(__A ).parent.name
_A : List[Any] = task_specific_params[f'''summarization_{dataset}''']['''max_position_embeddings''']
_A : List[str] = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""",model_max_length=__A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__A )
# convert model
_A : str = get_tf_weights_as_numpy(__A )
_A : List[Any] = task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
_A : Tuple = task_specific_params
_A : Optional[Any] = convert_pegasus(__A,__A )
torch_model.save_pretrained(__A )
_A : int = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__A,Path(__A ) / """pytorch_model.bin""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_snake_case = parser.parse_args()
if args.save_dir is None:
_snake_case = Path(args.tf_ckpt_path).parent.name
_snake_case = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 708
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_snake_case = logging.get_logger(__name__)
_snake_case = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a=None , _a=None , *_a , **_a ) -> Optional[int]:
super().__init__(*_a , **_a )
if config is None:
assert isinstance(self.model , _a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
_A : Optional[Any] = self.model.config
else:
_A : int = config
_A : Optional[Any] = data_args
_A : int = self.config.tgt_vocab_size if isinstance(self.config , _a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
_A : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_A : Union[str, Any] = label_smoothed_nll_loss
def a__ ( self , _a ) -> int:
if self.optimizer is None:
_A : List[str] = ["""bias""", """LayerNorm.weight"""]
_A : str = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_A : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_A : Dict = Adafactor
_A : int = {"""scale_parameter""": False, """relative_step""": False}
else:
_A : int = AdamW
_A : Any = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_A : List[str] = self.args.learning_rate
if self.sharded_ddp:
_A : List[str] = OSS(
params=_a , optim=_a , **_a , )
else:
_A : Tuple = optimizer_cls(_a , **_a )
if self.lr_scheduler is None:
_A : Union[str, Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def a__ ( self , _a ) -> Dict:
_A : List[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_A : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_A : Optional[int] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_A : List[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_a )
return scheduler
def a__ ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def a__ ( self , _a , _a , _a ) -> List[str]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_A : List[str] = model(**_a , use_cache=_a )[0]
_A : Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_A , _A : str = model(**_a , labels=_a , use_cache=_a )[:2]
else:
# compute label smoothed loss
_A : Any = model(**_a , use_cache=_a )[0]
_A : Union[str, Any] = torch.nn.functional.log_softmax(_a , dim=-1 )
_A , _A : List[str] = self.loss_fn(_a , _a , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def a__ ( self , _a , _a ) -> List[Any]:
_A : Optional[int] = inputs.pop("""labels""" )
_A , _A : Dict = self._compute_loss(_a , _a , _a )
return loss
def a__ ( self , _a , _a , _a , _a = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
_A : int = self._prepare_inputs(_a )
_A : Dict = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_A : List[str] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **_a , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_A : str = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
_A : Any = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_A , _A : Tuple = self._compute_loss(_a , _a , _a )
_A : Any = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_A : List[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_A : Any = self._pad_tensors_to_max_len(_a , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def a__ ( self , _a , _a ) -> Union[str, Any]:
# If PAD token is not defined at least EOS token has to be defined
_A : Optional[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
F''' padded to `max_length`={max_length}''' )
_A : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_A : Dict = tensor
return padded_tensor
| 54
| 0
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif stress < 0:
raise ValueError("""Stress cannot be negative""" )
elif tangential_force < 0:
raise ValueError("""Tangential Force cannot be negative""" )
elif area < 0:
raise ValueError("""Area cannot be negative""" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( snake_case_ ):
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case_,snake_case_ ) -> bool:
_A : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_A : Optional[int] = mean(
int(is_in_circle(uniform(-1.0,1.0 ),uniform(-1.0,1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
_A : List[str] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0.0,snake_case_ = 1.0,):
return mean(
function_to_integrate(uniform(snake_case_,snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.0,snake_case_ = 1.0 ):
def identity_function(snake_case_ ) -> float:
return x
_A : Any = area_under_curve_estimator(
snake_case_,snake_case_,snake_case_,snake_case_ )
_A : Tuple = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def lowerCAmelCase_ ( snake_case_ ):
def function_to_integrate(snake_case_ ) -> float:
return sqrt(4.0 - x * x )
_A : Optional[int] = area_under_curve_estimator(
snake_case_,snake_case_,0.0,2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54
| 0
|
import math
class lowercase :
def a__ ( self , _a , _a ) -> Optional[int]:
_A : Dict = 0.0
_A : List[str] = 0.0
for i in range(len(UpperCamelCase_ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def a__ ( self , _a , _a , _a , _a ) -> Tuple:
for i in range(len(UpperCamelCase_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowerCAmelCase_ ( ):
_A : Any = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_A : Union[str, Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_A : str = SelfOrganizingMap()
_A : Any = 3
_A : Union[str, Any] = 0.5
for _ in range(lowerCamelCase__ ):
for j in range(len(lowerCamelCase__ ) ):
# training sample
_A : Optional[Any] = training_samples[j]
# Compute the winning vector
_A : Tuple = self_organizing_map.get_winner(lowerCamelCase__,lowerCamelCase__ )
# Update the winning vector
_A : Dict = self_organizing_map.update(lowerCamelCase__,lowerCamelCase__,lowerCamelCase__,lowerCamelCase__ )
# classify test sample
_A : Any = [0, 0, 0, 1]
_A : Any = self_organizing_map.get_winner(lowerCamelCase__,lowerCamelCase__ )
# results
print(f'''Clusters that the test sample belongs to : {winner}''' )
print(f'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 710
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> int:
_A : int = tempfile.mkdtemp()
_A : Union[str, Any] = 8
# DPR tok
_A : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_A : List[str] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_A : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_A : Optional[Any] = dict(zip(_a , range(len(_a ) ) ) )
_A : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_A : Dict = {"""unk_token""": """<unk>"""}
_A : Optional[Any] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(_a , exist_ok=_a )
_A : str = os.path.join(_a , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_A : List[Any] = os.path.join(_a , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def a__ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def a__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def a__ ( self ) -> str:
_A : Optional[Any] = os.path.join(self.tmpdirname , """rag_tokenizer""" )
_A : int = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_A : Any = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_a )
rag_tokenizer.save_pretrained(_a )
_A : Optional[Any] = RagTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def a__ ( self ) -> str:
_A : Union[str, Any] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
_A : Tuple = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Tuple = tokenizer(_a )
self.assertIsNotNone(_a )
@slow
def a__ ( self ) -> Dict:
_A : Dict = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
_A : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_A : Optional[Any] = tokenizer(_a )
self.assertIsNotNone(_a )
| 54
| 0
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_snake_case = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_snake_case = [0, 25, 50]
_snake_case = [25, 50, 75]
_snake_case = fuzz.membership.trimf(X, abca)
_snake_case = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_snake_case = np.ones(75)
_snake_case = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
_snake_case = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_snake_case = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_snake_case = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_snake_case = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_snake_case = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_snake_case = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_snake_case = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_snake_case = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 711
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
_A : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_A : str = self.transformer_dir
shutil.copy(
os.path.join(_a , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def a__ ( self ) -> Optional[int]:
_A : List[str] = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def a__ ( self , _a , _a , _a , _a=None ) -> Optional[Any]:
_A : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_A : List[str] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_A : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_A : Optional[int] = black.format_str(_a , mode=_a )
_A : Optional[Any] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_a , """w""" , newline="""\n""" ) as f:
f.write(_a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_a )
with open(_a , """r""" ) as f:
self.assertTrue(f.read() , _a )
def a__ ( self ) -> str:
_A : Union[str, Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _a , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _a ) , )
# Copy consistency with a really long name
_A : List[str] = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , _a , _a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _a , overwrite_result=re.sub("""Bert""" , """TestModel""" , _a ) , )
def a__ ( self ) -> Tuple:
_A : Union[str, Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_A : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Any = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_A , _A : Tuple = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
self.assertFalse(_a )
self.assertEqual(_a , _a )
_A , _A : List[str] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_a )
_A : Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_A : Dict = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A : Optional[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_A , _A : Optional[int] = check_copies.convert_to_localized_md(
_a , _a , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_a , _a )
| 54
| 0
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase ( UpperCamelCase_ ):
_a = (EulerDiscreteScheduler,)
_a = 1_0
def a__ ( self , **_a ) -> Any:
_A : List[str] = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_a )
return config
def a__ ( self ) -> Optional[int]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def a__ ( self ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def a__ ( self ) -> List[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def a__ ( self ) -> Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def a__ ( self ) -> List[Any]:
_A : int = self.scheduler_classes[0]
_A : Optional[int] = self.get_scheduler_config()
_A : str = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : str = torch.manual_seed(0 )
_A : int = self.dummy_model()
_A : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Optional[Any] = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : List[str] = scheduler.scale_model_input(_a , _a )
_A : int = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a , generator=_a )
_A : str = output.prev_sample
_A : List[str] = torch.sum(torch.abs(_a ) )
_A : Any = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def a__ ( self ) -> int:
_A : Any = self.scheduler_classes[0]
_A : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_A : int = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : int = torch.manual_seed(0 )
_A : List[str] = self.dummy_model()
_A : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Tuple = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : Tuple = scheduler.scale_model_input(_a , _a )
_A : int = model(_a , _a )
_A : Optional[Any] = scheduler.step(_a , _a , _a , generator=_a )
_A : List[Any] = output.prev_sample
_A : Optional[int] = torch.sum(torch.abs(_a ) )
_A : Optional[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def a__ ( self ) -> Dict:
_A : str = self.scheduler_classes[0]
_A : Optional[Any] = self.get_scheduler_config()
_A : Tuple = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : Dict = torch.manual_seed(0 )
_A : Dict = self.dummy_model()
_A : int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_A : Dict = sample.to(_a )
for t in scheduler.timesteps:
_A : str = scheduler.scale_model_input(_a , _a )
_A : Optional[Any] = model(_a , _a )
_A : List[str] = scheduler.step(_a , _a , _a , generator=_a )
_A : Union[str, Any] = output.prev_sample
_A : Dict = torch.sum(torch.abs(_a ) )
_A : Tuple = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def a__ ( self ) -> List[Any]:
_A : Optional[Any] = self.scheduler_classes[0]
_A : int = self.get_scheduler_config()
_A : List[Any] = scheduler_class(**_a , use_karras_sigmas=_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : str = torch.manual_seed(0 )
_A : Optional[int] = self.dummy_model()
_A : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_A : Optional[int] = sample.to(_a )
for t in scheduler.timesteps:
_A : List[Any] = scheduler.scale_model_input(_a , _a )
_A : int = model(_a , _a )
_A : int = scheduler.step(_a , _a , _a , generator=_a )
_A : List[str] = output.prev_sample
_A : Optional[int] = torch.sum(torch.abs(_a ) )
_A : Dict = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 712
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_snake_case = random.Random()
def lowerCAmelCase_ ( snake_case_,snake_case_=1.0,snake_case_=None,snake_case_=None ):
if rng is None:
_A : str = global_rng
_A : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=400 , _a=2000 , _a=2048 , _a=128 , _a=1 , _a=512 , _a=30 , _a=4_4100 , ) -> Tuple:
_A : Any = parent
_A : str = batch_size
_A : Union[str, Any] = min_seq_length
_A : int = max_seq_length
_A : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_A : Tuple = spectrogram_length
_A : int = feature_size
_A : str = num_audio_channels
_A : Tuple = hop_length
_A : List[str] = chunk_length
_A : Union[str, Any] = sampling_rate
def a__ ( self ) -> Tuple:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def a__ ( self , _a=False , _a=False ) -> Optional[int]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_A : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_A : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_A : List[Any] = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = TvltFeatureExtractor
def a__ ( self ) -> Any:
_A : int = TvltFeatureExtractionTester(self )
def a__ ( self ) -> List[Any]:
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , """spectrogram_length""" ) )
self.assertTrue(hasattr(_a , """feature_size""" ) )
self.assertTrue(hasattr(_a , """num_audio_channels""" ) )
self.assertTrue(hasattr(_a , """hop_length""" ) )
self.assertTrue(hasattr(_a , """chunk_length""" ) )
self.assertTrue(hasattr(_a , """sampling_rate""" ) )
def a__ ( self ) -> Optional[int]:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Tuple = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_A : Optional[int] = self.feature_extraction_class.from_pretrained(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : int = feat_extract_second.to_dict()
_A : int = dict_first.pop("""mel_filters""" )
_A : Optional[int] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> int:
_A : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[str] = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
_A : Union[str, Any] = self.feature_extraction_class.from_json_file(_a )
_A : Optional[Any] = feat_extract_first.to_dict()
_A : Union[str, Any] = feat_extract_second.to_dict()
_A : List[Any] = dict_first.pop("""mel_filters""" )
_A : Optional[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def a__ ( self ) -> Optional[Any]:
# Initialize feature_extractor
_A : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_A : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_A : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_A : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_A : str = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_A : Optional[Any] = feature_extractor(
_a , return_tensors="""np""" , sampling_rate=4_4100 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_A : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_A : Optional[int] = np.asarray(_a )
_A : Optional[Any] = feature_extractor(_a , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def a__ ( self , _a ) -> str:
_A : Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_A : Dict = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Optional[Any]:
_A : List[str] = self._load_datasamples(1 )
_A : List[str] = TvltFeatureExtractor()
_A : List[Any] = feature_extractor(_a , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_A : int = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 54
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowercase ( _snake_case ):
_a = 'poolformer'
def __init__( self , _a=3 , _a=16 , _a=16 , _a=3 , _a=4.0 , _a=[2, 2, 6, 2] , _a=[64, 128, 320, 512] , _a=[7, 3, 3, 3] , _a=[4, 2, 2, 2] , _a=[2, 1, 1, 1] , _a=4 , _a=0.0 , _a="gelu" , _a=True , _a=1e-5 , _a=0.02 , **_a , ) -> Optional[int]:
_A : Optional[Any] = num_channels
_A : Dict = patch_size
_A : Tuple = stride
_A : Dict = padding
_A : List[str] = pool_size
_A : List[Any] = hidden_sizes
_A : Optional[int] = mlp_ratio
_A : Tuple = depths
_A : int = patch_sizes
_A : List[str] = strides
_A : Dict = num_encoder_blocks
_A : Tuple = drop_path_rate
_A : Any = hidden_act
_A : Tuple = use_layer_scale
_A : Optional[Any] = layer_scale_init_value
_A : Tuple = initializer_range
super().__init__(**_a )
class lowercase ( _snake_case ):
_a = version.parse("1.11" )
@property
def a__ ( self ) -> int:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a__ ( self ) -> List[str]:
return 2e-3
| 713
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 0
|
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class lowercase ( UpperCamelCase__ ):
_a = "owlvit_text_model"
def __init__( self , _a=4_9408 , _a=512 , _a=2048 , _a=12 , _a=8 , _a=16 , _a="quick_gelu" , _a=1e-5 , _a=0.0 , _a=0.02 , _a=1.0 , _a=0 , _a=4_9406 , _a=4_9407 , **_a , ) -> List[str]:
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_A : int = vocab_size
_A : Dict = hidden_size
_A : Tuple = intermediate_size
_A : str = num_hidden_layers
_A : Dict = num_attention_heads
_A : Any = max_position_embeddings
_A : List[Any] = hidden_act
_A : Union[str, Any] = layer_norm_eps
_A : int = attention_dropout
_A : Tuple = initializer_range
_A : Union[str, Any] = initializer_factor
@classmethod
def a__ ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCAmelCase )
_A , _A : Dict = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
_A : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase ( UpperCamelCase__ ):
_a = "owlvit_vision_model"
def __init__( self , _a=768 , _a=3072 , _a=12 , _a=12 , _a=3 , _a=768 , _a=32 , _a="quick_gelu" , _a=1e-5 , _a=0.0 , _a=0.02 , _a=1.0 , **_a , ) -> Dict:
super().__init__(**_UpperCAmelCase )
_A : Union[str, Any] = hidden_size
_A : List[str] = intermediate_size
_A : Optional[int] = num_hidden_layers
_A : List[str] = num_attention_heads
_A : str = num_channels
_A : Optional[Any] = image_size
_A : Any = patch_size
_A : List[Any] = hidden_act
_A : List[str] = layer_norm_eps
_A : int = attention_dropout
_A : Any = initializer_range
_A : str = initializer_factor
@classmethod
def a__ ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCAmelCase )
_A , _A : Union[str, Any] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
_A : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase ( UpperCamelCase__ ):
_a = "owlvit"
_a = True
def __init__( self , _a=None , _a=None , _a=512 , _a=2.6592 , _a=True , **_a , ) -> Tuple:
super().__init__(**_UpperCAmelCase )
if text_config is None:
_A : Union[str, Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
_A : str = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
_A : Optional[int] = OwlViTTextConfig(**_UpperCAmelCase )
_A : Tuple = OwlViTVisionConfig(**_UpperCAmelCase )
_A : Dict = projection_dim
_A : Optional[Any] = logit_scale_init_value
_A : List[Any] = return_dict
_A : List[Any] = 1.0
@classmethod
def a__ ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCAmelCase )
_A , _A : List[Any] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def a__ ( cls , _a , _a , **_a ) -> Union[str, Any]:
_A : Any = {}
_A : Optional[Any] = text_config
_A : Any = vision_config
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self ) -> List[str]:
_A : Optional[int] = copy.deepcopy(self.__dict__ )
_A : Optional[Any] = self.text_config.to_dict()
_A : List[Any] = self.vision_config.to_dict()
_A : Union[str, Any] = self.__class__.model_type
return output
class lowercase ( UpperCamelCase__ ):
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def a__ ( self ) -> float:
return 1e-4
def a__ ( self , _a , _a = -1 , _a = -1 , _a = None , ) -> Mapping[str, Any]:
_A : Dict = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , framework=_UpperCAmelCase )
_A : str = super().generate_dummy_inputs(
processor.image_processor , batch_size=_UpperCAmelCase , framework=_UpperCAmelCase )
return {**text_input_dict, **image_input_dict}
@property
def a__ ( self ) -> int:
return 14
| 714
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = KandinskyVaaImgaImgPipeline
_a = ["image_embeds", "negative_image_embeds", "image"]
_a = [
"image_embeds",
"negative_image_embeds",
"image",
]
_a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> int:
return 32
@property
def a__ ( self ) -> Union[str, Any]:
return 32
@property
def a__ ( self ) -> List[str]:
return self.time_input_dim
@property
def a__ ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> str:
return 100
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_A : Union[str, Any] = UNetaDConditionModel(**_a )
return model
@property
def a__ ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self ) -> int:
_A : Any = self.dummy_unet
_A : List[Any] = self.dummy_movq
_A : str = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_A : int = DDIMScheduler(**_a )
_A : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self , _a , _a=0 ) -> str:
_A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_A : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : Optional[Any] = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((256, 256) )
if str(_a ).startswith("""mps""" ):
_A : Tuple = torch.manual_seed(_a )
else:
_A : str = torch.Generator(device=_a ).manual_seed(_a )
_A : Optional[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> Union[str, Any]:
_A : Dict = """cpu"""
_A : int = self.get_dummy_components()
_A : Optional[int] = self.pipeline_class(**_a )
_A : Any = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = pipe(**self.get_dummy_inputs(_a ) )
_A : Dict = output.images
_A : List[str] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_A : Dict = image[0, -3:, -3:, -1]
_A : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Optional[int] = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> List[str]:
_A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_A : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_A : Dict = """A red cartoon frog, 4k"""
_A : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_A : int = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_A : Dict = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A , _A : List[str] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_A : int = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 54
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715
|
def lowerCAmelCase_ ( snake_case_ = 1000000 ):
_A : Any = limit + 1
_A : Tuple = [0] * limit
for first_term in range(1,snake_case_ ):
for n in range(snake_case_,snake_case_,snake_case_ ):
_A : Optional[int] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_A : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_snake_case : Optional[int] = logging.get_logger(__name__)
@dataclass
class lowercase ( a__ ):
_a = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **_a ) -> List[Any]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_A : str = deprecated_arg[3:]
_A : List[Any] = not kwargs.pop(_A )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
_A : Tuple = kwargs.pop("""tpu_name""" , self.tpu_name )
_A : Any = kwargs.pop("""device_idx""" , self.device_idx )
_A : List[Any] = kwargs.pop("""eager_mode""" , self.eager_mode )
_A : List[str] = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**_A )
_a = field(
default=a__,metadata={"help": "Name of TPU"},)
_a = field(
default=0,metadata={"help": "CPU / GPU device index. Defaults to 0."},)
_a = field(default=a__,metadata={"help": "Benchmark models in eager model."} )
_a = field(
default=a__,metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
},)
@cached_property
def a__ ( self ) -> Dict:
requires_backends(self , ["""tf"""] )
_A : Optional[Any] = None
if self.tpu:
try:
if self.tpu_name:
_A : Tuple = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_A : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_A : Union[str, Any] = None
return tpu
@cached_property
def a__ ( self ) -> Dict:
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_A : Any = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
_A : str = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
_A : List[str] = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def a__ ( self ) -> List[Any]:
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def a__ ( self ) -> Union[str, Any]:
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def a__ ( self ) -> Optional[Any]:
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def a__ ( self ) -> str:
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def a__ ( self ) -> Optional[Any]:
return self.n_gpu > 0
| 716
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowercase ( tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ) -> Any:
super().__init__()
_A : Dict = pad_token_id
_A : List[Any] = max_length
_A : Optional[int] = vocab
_A : Optional[int] = merges
_A : Optional[int] = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> str:
_A : Any = [""" """.join(_a ) for m in tokenizer.bpe_ranks.keys()]
_A : str = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> List[Any]:
_A : Union[str, Any] = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def a__ ( cls , _a ) -> Union[str, Any]:
return cls(**_a )
def a__ ( self ) -> Union[str, Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a__ ( self , _a , _a = None ) -> int:
_A : Optional[int] = self.tf_tokenizer(_a )
_A : Tuple = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
_A : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
_A , _A : Dict = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 54
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = "▁"
_snake_case = {"vocab_file": "spiece.model"}
_snake_case = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
_snake_case = {
"google/reformer-crime-and-punishment": 524288,
}
class lowercase ( _A ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "attention_mask"]
def __init__( self , _a , _a="</s>" , _a="<unk>" , _a=[] , _a = None , **_a , ) -> None:
_A : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_A : Union[str, Any] = vocab_file
_A : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def a__ ( self ) -> Optional[Any]:
return self.sp_model.get_piece_size()
def a__ ( self ) -> Dict[str, int]:
_A : int = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> int:
_A : Union[str, Any] = self.__dict__.copy()
_A : int = None
return state
def __setstate__( self , _a ) -> Optional[Any]:
_A : Dict = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_A : Optional[int] = {}
_A : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__ ( self , _a ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def a__ ( self , _a ) -> Dict:
return self.sp_model.piece_to_id(__lowerCamelCase )
def a__ ( self , _a ) -> Dict:
if index < self.sp_model.get_piece_size():
_A : Optional[Any] = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def a__ ( self , _a ) -> str:
_A : Dict = []
_A : int = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
_A : Tuple = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : Union[str, Any] = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , """wb""" ) as fi:
_A : Dict = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 717
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_A : List[Any] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> int:
_A : Dict = """</s>"""
_A : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> Dict:
_A : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_a ) , 1103 )
def a__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> Tuple:
_A : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_A : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
_A : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : Optional[int] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_A : Union[str, Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> List[str]:
_A : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : Any = """To ensure a smooth flow of bank resolutions."""
_A : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = ["""This is going to be way too long.""" * 150, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Union[str, Any] = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : Tuple = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[Any]:
# fmt: off
_A : List[Any] = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Tuple = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[str]:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_A : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : int = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def a__ ( self ) -> Optional[int]:
_A : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
_A : Optional[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> Dict:
_A : Optional[int] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_A : Any = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 54
| 0
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowercase :
def __init__( self , _a , ) -> int:
_A : Dict = parent
_A : Tuple = 13
_A : Union[str, Any] = 7
_A : Optional[Any] = True
_A : int = True
_A : Optional[int] = True
_A : int = 99
_A : Optional[Any] = 32
_A : Optional[Any] = 2
_A : Optional[int] = 4
_A : Dict = 37
_A : Union[str, Any] = '''gelu'''
_A : Optional[Any] = 0.1
_A : Union[str, Any] = 0.1
_A : Optional[int] = 512
_A : Union[str, Any] = 16
_A : List[Any] = 2
_A : Dict = 0.02
_A : List[str] = 3
_A : Optional[int] = 4
_A : Dict = None
def a__ ( self ) -> str:
_A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : Optional[Any] = None
if self.use_input_mask:
_A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_A : Tuple = None
_A : Union[str, Any] = None
_A : List[str] = None
if self.use_labels:
_A : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A : int = ids_tensor([self.batch_size] , self.num_choices )
_A : Dict = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ) -> Optional[int]:
(
_A
) : List[str] = self.prepare_config_and_inputs()
_A : str = True
_A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> Dict:
_A : List[Any] = TFEsmModel(config=lowerCamelCase_ )
_A : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_A : int = model(lowerCamelCase_ )
_A : Optional[Any] = [input_ids, input_mask]
_A : int = model(lowerCamelCase_ )
_A : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , _a , ) -> str:
_A : Dict = True
_A : Tuple = TFEsmModel(config=lowerCamelCase_ )
_A : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
_A : Any = model(lowerCamelCase_ )
_A : List[str] = [input_ids, input_mask]
_A : Any = model(lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ )
# Also check the case where encoder outputs are not passed
_A : Optional[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> Any:
_A : int = TFEsmForMaskedLM(config=lowerCamelCase_ )
_A : Optional[int] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> int:
_A : List[Any] = self.num_labels
_A : List[Any] = TFEsmForTokenClassification(config=lowerCamelCase_ )
_A : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_A : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self ) -> Union[str, Any]:
_A : Any = self.prepare_config_and_inputs()
(
_A
) : List[Any] = config_and_inputs
_A : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCAmelCase,_UpperCAmelCase,unittest.TestCase ):
_a = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_a = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_a = False
_a = False
def a__ ( self ) -> Dict:
_A : List[str] = TFEsmModelTester(self )
_A : Any = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def a__ ( self ) -> str:
self.config_tester.run_common_tests()
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ ( self ) -> List[str]:
_A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase_ )
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def a__ ( self ) -> Optional[Any]:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def a__ ( self ) -> str:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : List[Any] = TFEsmModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def a__ ( self ) -> Dict:
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def a__ ( self ) -> Dict:
pass
def a__ ( self ) -> List[Any]:
_A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Tuple = model_class(lowerCamelCase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_A : Dict = model.get_bias()
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
for k, v in name.items():
assert isinstance(lowerCamelCase_ , tf.Variable )
else:
_A : List[Any] = model.get_output_embeddings()
assert x is None
_A : str = model.get_bias()
assert name is None
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> Optional[Any]:
_A : int = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_A : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
_A : Tuple = model(lowerCamelCase_ )[0]
_A : Any = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowerCamelCase_ )
# compare the actual values for a slice.
_A : Optional[Any] = tf.constant(
[
[
[8.921518, -10.58_9814, -6.4671307],
[-6.3967156, -13.91_1377, -1.1211915],
[-7.781247, -13.95_1557, -3.740592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def a__ ( self ) -> List[str]:
_A : Tuple = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_A : List[str] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_A : Union[str, Any] = model(lowerCamelCase_ )[0]
# compare the actual values for a slice.
_A : Any = tf.constant(
[
[
[0.14443092, 0.54125327, 0.3247739],
[0.30340484, 0.00526676, 0.31077722],
[0.32278043, -0.24987096, 0.3414628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 718
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 0
|
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(
description=(
"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="bert", choices=["bert"])
parser.add_argument("--model_name", default="bert-base-uncased", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
_snake_case = parser.parse_args()
if args.model_type == "bert":
_snake_case = BertForMaskedLM.from_pretrained(args.model_name)
_snake_case = '''bert'''
else:
raise ValueError("args.model_type should be \"bert\".")
_snake_case = model.state_dict()
_snake_case = {}
for w in ["word_embeddings", "position_embeddings"]:
_snake_case = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_snake_case = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_snake_case = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
_snake_case = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_snake_case = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_snake_case = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_snake_case = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_snake_case = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_snake_case = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_snake_case = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_snake_case = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_snake_case = state_dict['''cls.predictions.decoder.weight''']
_snake_case = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
_snake_case = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_snake_case = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 719
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> Optional[int]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(_a )
def a__ ( self ) -> Any:
_A : str = self._create_example_records()
_A : List[Any] = Dataset.from_list(_a )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(_a ):
self.assertDictEqual(_a , example_records[i] )
def a__ ( self ) -> List[str]:
_A : Dict = self._create_example_records()
_A : List[str] = Dataset.from_list(_a )
_A : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a__ ( self ) -> str: # checks what happens with missing columns
_A : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_A : List[str] = Dataset.from_list(_a )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a__ ( self ) -> Dict: # checks if the type can be inferred from the second record
_A : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_A : str = Dataset.from_list(_a )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a__ ( self ) -> Dict:
_A : List[str] = Dataset.from_list([] )
self.assertEqual(len(_a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 54
| 0
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _a ):
_a = (PNDMScheduler,)
_a = (("""num_inference_steps""", 5_0),)
def a__ ( self , **_a ) -> Optional[int]:
_A : Dict = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**snake_case_ )
return config
def a__ ( self , _a=0 , **_a ) -> int:
_A : int = dict(self.forward_default_kwargs )
_A : Dict = kwargs.pop("""num_inference_steps""" , snake_case_ )
_A : Optional[Any] = self.dummy_sample
_A : str = 0.1 * sample
_A : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_A : Optional[int] = self.get_scheduler_config(**snake_case_ )
_A : List[Any] = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
_A : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
_A : List[Any] = scheduler_class.from_pretrained(snake_case_ )
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
_A : str = dummy_past_residuals[:]
_A : Dict = scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
_A : Optional[Any] = new_scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_A : Tuple = scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
_A : Tuple = new_scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ ( self ) -> Optional[Any]:
pass
def a__ ( self , _a=0 , **_a ) -> List[str]:
_A : List[Any] = dict(self.forward_default_kwargs )
_A : List[str] = kwargs.pop("""num_inference_steps""" , snake_case_ )
_A : Optional[int] = self.dummy_sample
_A : Optional[int] = 0.1 * sample
_A : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_A : Any = self.get_scheduler_config()
_A : Optional[int] = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals (must be after setting timesteps)
_A : List[str] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
_A : Optional[int] = scheduler_class.from_pretrained(snake_case_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residual (must be after setting timesteps)
_A : Optional[Any] = dummy_past_residuals[:]
_A : str = scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
_A : List[str] = new_scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_A : Optional[Any] = scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
_A : int = new_scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ ( self , **_a ) -> Union[str, Any]:
_A : str = self.scheduler_classes[0]
_A : Optional[int] = self.get_scheduler_config(**snake_case_ )
_A : int = scheduler_class(**snake_case_ )
_A : Optional[Any] = 10
_A : Optional[Any] = self.dummy_model()
_A : Dict = self.dummy_sample_deter
scheduler.set_timesteps(snake_case_ )
for i, t in enumerate(scheduler.prk_timesteps ):
_A : Dict = model(snake_case_ , snake_case_ )
_A : str = scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_A : int = model(snake_case_ , snake_case_ )
_A : Dict = scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ ).prev_sample
return sample
def a__ ( self ) -> Union[str, Any]:
_A : Optional[Any] = dict(self.forward_default_kwargs )
_A : Any = kwargs.pop("""num_inference_steps""" , snake_case_ )
for scheduler_class in self.scheduler_classes:
_A : Tuple = self.get_scheduler_config()
_A : Optional[int] = scheduler_class(**snake_case_ )
_A : int = self.dummy_sample
_A : int = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case_ , """set_timesteps""" ):
scheduler.set_timesteps(snake_case_ )
elif num_inference_steps is not None and not hasattr(snake_case_ , """set_timesteps""" ):
_A : Any = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_A : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_A : int = dummy_past_residuals[:]
_A : Union[str, Any] = scheduler.step_prk(snake_case_ , 0 , snake_case_ , **snake_case_ ).prev_sample
_A : Union[str, Any] = scheduler.step_prk(snake_case_ , 1 , snake_case_ , **snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_A : List[str] = scheduler.step_plms(snake_case_ , 0 , snake_case_ , **snake_case_ ).prev_sample
_A : Optional[int] = scheduler.step_plms(snake_case_ , 1 , snake_case_ , **snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def a__ ( self ) -> Dict:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def a__ ( self ) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=snake_case_ )
_A : Optional[Any] = self.scheduler_classes[0]
_A : Tuple = self.get_scheduler_config(steps_offset=1 )
_A : List[Any] = scheduler_class(**snake_case_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def a__ ( self ) -> Optional[Any]:
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def a__ ( self ) -> str:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case_ )
def a__ ( self ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def a__ ( self ) -> Union[str, Any]:
for t in [1, 5, 10]:
self.check_over_forward(time_step=snake_case_ )
def a__ ( self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=snake_case_ )
def a__ ( self ) -> Optional[int]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
_A : List[str] = 27
for scheduler_class in self.scheduler_classes:
_A : Any = self.dummy_sample
_A : List[str] = 0.1 * sample
_A : Any = self.get_scheduler_config()
_A : Tuple = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_A : str = scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ ).prev_sample
def a__ ( self ) -> List[str]:
with self.assertRaises(snake_case_ ):
_A : List[Any] = self.scheduler_classes[0]
_A : Dict = self.get_scheduler_config()
_A : Optional[int] = scheduler_class(**snake_case_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def a__ ( self ) -> Dict:
_A : Optional[int] = self.full_loop()
_A : Any = torch.sum(torch.abs(snake_case_ ) )
_A : Union[str, Any] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def a__ ( self ) -> Optional[Any]:
_A : int = self.full_loop(prediction_type="""v_prediction""" )
_A : Tuple = torch.sum(torch.abs(snake_case_ ) )
_A : Any = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def a__ ( self ) -> List[str]:
# We specify different beta, so that the first alpha is 0.99
_A : Any = self.full_loop(set_alpha_to_one=snake_case_ , beta_start=0.01 )
_A : Tuple = torch.sum(torch.abs(snake_case_ ) )
_A : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def a__ ( self ) -> Union[str, Any]:
# We specify different beta, so that the first alpha is 0.99
_A : List[Any] = self.full_loop(set_alpha_to_one=snake_case_ , beta_start=0.01 )
_A : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_A : List[str] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3
| 720
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = list(snake_case_ )
_A : List[Any] = list(snake_case_ )
_A : Tuple = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
_A : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = []
while True:
_A : int = ["""$"""] * len(snake_case_ )
_A : Any = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1,len(snake_case_ ) ):
_A : Tuple = compare_string(binary[i],binary[j] )
if k is False:
_A : str = """*"""
_A : str = """*"""
temp.append("""X""" )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
_A : Dict = list(set(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = []
for minterm in minterms:
_A : Tuple = """"""
for _ in range(snake_case_ ):
_A : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = list(snake_case_ )
_A : Tuple = list(snake_case_ )
_A : Dict = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = []
_A : str = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
_A : Union[str, Any] = 0
_A : Optional[Any] = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
_A : Dict = j
if count == 1:
_A : int = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
_A : int = 0
temp.append(prime_implicants[i] )
while True:
_A : Optional[Any] = 0
_A : Tuple = -1
_A : List[Any] = 0
for i in range(len(snake_case_ ) ):
_A : List[str] = chart[i].count(1 )
if count_n > max_n:
_A : Optional[int] = count_n
_A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
_A : Optional[int] = 0
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
_A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i],binary[j],snake_case_ ):
_A : Union[str, Any] = 1
return chart
def lowerCAmelCase_ ( ):
_A : Dict = int(input("""Enter the no. of variables\n""" ) )
_A : Dict = [
float(snake_case_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_A : int = decimal_to_binary(snake_case_,snake_case_ )
_A : Optional[Any] = check(snake_case_ )
print("""Prime Implicants are:""" )
print(snake_case_ )
_A : int = prime_implicant_chart(snake_case_,snake_case_ )
_A : int = selection(snake_case_,snake_case_ )
print("""Essential Prime Implicants are:""" )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 54
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.