code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class A__ ( snake_case_ ):
def __init__( self : Tuple , _a : pyspark.sql.DataFrame , _a : Optional[NamedSplit] = None , _a : Optional[Features] = None , _a : bool = True , _a : str = None , _a : bool = False , _a : str = None , _a : bool = True , _a : str = "arrow" , **_a : Optional[int] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
split=_a , features=_a , cache_dir=_a , keep_in_memory=_a , streaming=_a , **_a , )
_SCREAMING_SNAKE_CASE =load_from_cache_file
_SCREAMING_SNAKE_CASE =file_format
_SCREAMING_SNAKE_CASE =Spark(
df=_a , features=_a , cache_dir=_a , working_dir=_a , **_a , )
def A ( self : Optional[Any] ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_SCREAMING_SNAKE_CASE =None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_a , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 405
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 417
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Any = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class A__ ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'swin2sr'
SCREAMING_SNAKE_CASE = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: List[str]=64 , _SCREAMING_SNAKE_CASE: Any=1 , _SCREAMING_SNAKE_CASE: Optional[Any]=3 , _SCREAMING_SNAKE_CASE: int=180 , _SCREAMING_SNAKE_CASE: str=[6, 6, 6, 6, 6, 6] , _SCREAMING_SNAKE_CASE: int=[6, 6, 6, 6, 6, 6] , _SCREAMING_SNAKE_CASE: Tuple=8 , _SCREAMING_SNAKE_CASE: List[str]=2.0 , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , _SCREAMING_SNAKE_CASE: str=0.0 , _SCREAMING_SNAKE_CASE: List[str]=0.0 , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: Dict=False , _SCREAMING_SNAKE_CASE: List[str]=0.02 , _SCREAMING_SNAKE_CASE: str=1e-5 , _SCREAMING_SNAKE_CASE: List[str]=2 , _SCREAMING_SNAKE_CASE: Any=1.0 , _SCREAMING_SNAKE_CASE: Any="1conv" , _SCREAMING_SNAKE_CASE: List[Any]="pixelshuffle" , **_SCREAMING_SNAKE_CASE: Dict , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase)
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embed_dim
__lowerCAmelCase = depths
__lowerCAmelCase = len(_UpperCAmelCase)
__lowerCAmelCase = num_heads
__lowerCAmelCase = window_size
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = hidden_act
__lowerCAmelCase = use_absolute_embeddings
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
__lowerCAmelCase = upscale
__lowerCAmelCase = img_range
__lowerCAmelCase = resi_connection
__lowerCAmelCase = upsampler
| 713
|
"""simple docstring"""
import string
from math import logaa
def _lowercase ( __snake_case ,__snake_case ) -> int:
__lowerCAmelCase : int = document.translate(
str.maketrans("" ,"" ,string.punctuation ) ).replace("\n" ,"" )
__lowerCAmelCase : Dict = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _lowercase ( __snake_case ,__snake_case ) -> tuple[int, int]:
__lowerCAmelCase : Optional[Any] = corpus.lower().translate(
str.maketrans("" ,"" ,string.punctuation ) ) # strip all punctuation and replace it with ''
__lowerCAmelCase : List[str] = corpus_without_punctuation.split("\n" )
__lowerCAmelCase : str = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__snake_case ))
def _lowercase ( __snake_case ,__snake_case ,__snake_case=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) ,3 )
def _lowercase ( __snake_case ,__snake_case ) -> float:
return round(tf * idf ,3 )
| 615
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 300
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : Any = 3
_UpperCAmelCase : List[Any] = (32, 32)
_UpperCAmelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A_ )
return image
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : int = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
def extract(*A_ , **A_ ):
class a :
def __init__( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = torch.ones([0] )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
self.pixel_values.to(A_ )
return self
return Out()
return extract
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet
_UpperCAmelCase : List[str] = PNDMScheduler(skip_prk_steps=A_ )
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : Any = self.dummy_text_encoder
_UpperCAmelCase : Tuple = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_UpperCAmelCase : Dict = 77
_UpperCAmelCase : Optional[int] = self.dummy_image.to(A_ )
_UpperCAmelCase : List[Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Tuple = AltDiffusionImgaImgPipeline(
unet=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , safety_checker=A_ , feature_extractor=self.dummy_extractor , )
_UpperCAmelCase : str = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A_ )
_UpperCAmelCase : Dict = alt_pipe.to(A_ )
alt_pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : List[str] = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Optional[int] = torch.Generator(device=A_ ).manual_seed(0 )
_UpperCAmelCase : Optional[int] = alt_pipe(
[prompt] , generator=A_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=A_ , )
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Tuple = torch.Generator(device=A_ ).manual_seed(0 )
_UpperCAmelCase : Dict = alt_pipe(
[prompt] , generator=A_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=A_ , return_dict=A_ , )[0]
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
_UpperCAmelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase : int = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = self.dummy_cond_unet
_UpperCAmelCase : Any = PNDMScheduler(skip_prk_steps=A_ )
_UpperCAmelCase : Optional[Any] = self.dummy_vae
_UpperCAmelCase : Any = self.dummy_text_encoder
_UpperCAmelCase : Dict = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_UpperCAmelCase : Any = 77
_UpperCAmelCase : List[Any] = self.dummy_image.to(A_ )
# put models in fp16
_UpperCAmelCase : str = unet.half()
_UpperCAmelCase : Optional[Any] = vae.half()
_UpperCAmelCase : int = bert.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Optional[int] = AltDiffusionImgaImgPipeline(
unet=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , safety_checker=A_ , feature_extractor=self.dummy_extractor , )
_UpperCAmelCase : int = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A_ )
_UpperCAmelCase : List[str] = alt_pipe.to(A_ )
alt_pipe.set_progress_bar_config(disable=A_ )
_UpperCAmelCase : List[str] = "A painting of a squirrel eating a burger"
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : int = alt_pipe(
[prompt] , generator=A_ , num_inference_steps=2 , output_type="np" , image=A_ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
_UpperCAmelCase : Dict = init_image.resize((760, 504) )
_UpperCAmelCase : str = "BAAI/AltDiffusion"
_UpperCAmelCase : str = AltDiffusionImgaImgPipeline.from_pretrained(
A_ , safety_checker=A_ , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
_UpperCAmelCase : Union[str, Any] = "A fantasy landscape, trending on artstation"
_UpperCAmelCase : int = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , generator=A_ , output_type="np" , )
_UpperCAmelCase : Union[str, Any] = output.images[0]
_UpperCAmelCase : Optional[int] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
_UpperCAmelCase : Optional[int] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
_UpperCAmelCase : Optional[int] = init_image.resize((768, 512) )
_UpperCAmelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
_UpperCAmelCase : int = "BAAI/AltDiffusion"
_UpperCAmelCase : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained(
A_ , safety_checker=A_ , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
_UpperCAmelCase : int = "A fantasy landscape, trending on artstation"
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : int = pipe(
prompt=A_ , image=A_ , strength=0.75 , guidance_scale=7.5 , generator=A_ , output_type="np" , )
_UpperCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 300
| 1
|
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> float:
"""simple docstring"""
def get_matched_characters(lowercase_ , lowercase_ ) -> str:
A__ = []
A__ = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
A__ = int(max(0 , i - limit ) )
A__ = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowercase_ )
A__ = f"""{_stra[0:_stra.index(lowercase_ )]} {_stra[_stra.index(lowercase_ ) + 1:]}"""
return "".join(lowercase_ )
# matching characters
A__ = get_matched_characters(lowercase_ , lowercase_ )
A__ = get_matched_characters(lowercase_ , lowercase_ )
A__ = len(lowercase_ )
# transposition
A__ = (
len([(ca, ca) for ca, ca in zip(lowercase_ , lowercase_ ) if ca != ca] ) // 2
)
if not match_count:
A__ = 0.0
else:
A__ = (
1
/ 3
* (
match_count / len(lowercase_ )
+ match_count / len(lowercase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
A__ = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 706
|
from collections import namedtuple
import requests
from lxml import html # type: ignore
_lowerCamelCase : Optional[int] = namedtuple("""covid_data""", """cases deaths recovered""")
def SCREAMING_SNAKE_CASE ( lowercase_ = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
"""simple docstring"""
A__ = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(lowercase_ ).content ).xpath(lowercase_ ) )
_lowerCamelCase : Optional[int] = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 177
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def __snake_case ( _UpperCAmelCase : list[Any]):
create_state_space_tree(_UpperCAmelCase, [], 0)
def __snake_case ( _UpperCAmelCase : list[Any], _UpperCAmelCase : list[Any], _UpperCAmelCase : int):
if index == len(_UpperCAmelCase):
print(_UpperCAmelCase)
return
create_state_space_tree(_UpperCAmelCase, _UpperCAmelCase, index + 1)
current_subsequence.append(sequence[index])
create_state_space_tree(_UpperCAmelCase, _UpperCAmelCase, index + 1)
current_subsequence.pop()
if __name__ == "__main__":
snake_case_ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 212
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Optional[Any] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 212
| 1
|
'''simple docstring'''
_lowerCAmelCase : List[str] = {str(digit): digit**5 for digit in range(10)}
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_A ) )
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(_A ) )
if __name__ == "__main__":
print(solution())
| 646
|
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCamelCase ( _A : np.ndarray , _A : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = math.sqrt(_A )
lowerCAmelCase : Union[str, Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCamelCase ( _A : np.ndarray , _A : int , _A : int , _A : int ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : int = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCamelCase ( _A : int , _A : float ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : Dict = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _A ):
for j in range(0 , _A ):
lowerCAmelCase : Optional[int] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_A , _A )
def __UpperCamelCase ( _A : np.ndarray , _A : float , _A : float , _A : int , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase : str = np.zeros(img.shape )
lowerCAmelCase : int = get_gauss_kernel(_A , _A )
lowerCAmelCase , lowerCAmelCase : Dict = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
lowerCAmelCase : int = get_slice(_A , _A , _A , _A )
lowerCAmelCase : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
lowerCAmelCase : str = vec_gaussian(_A , _A )
lowerCAmelCase : Optional[int] = np.multiply(_A , _A )
lowerCAmelCase : str = np.multiply(_A , _A )
lowerCAmelCase : Union[str, Any] = np.sum(_A ) / np.sum(_A )
lowerCAmelCase : Tuple = val
return imga
def __UpperCamelCase ( _A : list ) -> tuple:
"""simple docstring"""
lowerCAmelCase : List[Any] = args[1] if args[1:] else '../image_data/lena.jpg'
lowerCAmelCase : Any = float(args[2] ) if args[2:] else 1.0
lowerCAmelCase : Union[str, Any] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowerCAmelCase : int = int(args[4] )
lowerCAmelCase : Optional[Any] = kernel_size + abs(kernel_size % 2 - 1 )
else:
lowerCAmelCase : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = parse_args(sys.argv)
_lowerCAmelCase : str = cva.imread(filename, 0)
cva.imshow('input image', img)
_lowerCAmelCase : Union[str, Any] = img / 255
_lowerCAmelCase : List[str] = out.astype('float32')
_lowerCAmelCase : Optional[int] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
_lowerCAmelCase : Union[str, Any] = out * 255
_lowerCAmelCase : Optional[Any] = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 646
| 1
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowercase_ (A : Union[dict, list, tuple, torch.Tensor] ):
snake_case__ : List[str] = []
if isinstance(A , A ):
for v in tree.values():
shapes.extend(_fetch_dims(A ) )
elif isinstance(A , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(A ) )
elif isinstance(A , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def lowercase_ (A : int , A : Tuple[int, ...] ):
snake_case__ : Optional[int] = []
for d in reversed(A ):
idx.append(flat_idx % d )
snake_case__ : Tuple = flat_idx // d
return tuple(reversed(A ) )
@torch.jit.ignore
def lowercase_ (A : Sequence[int] , A : Sequence[int] , A : Sequence[int] , A : Optional[Sequence[bool]] = None , A : Optional[Sequence[bool]] = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(A : List[bool] ) -> None:
snake_case__ : Optional[int] = True
for i in range(len(A ) ):
snake_case__ : int = -1 * (i + 1)
l[reversed_idx] &= tally
snake_case__ : Tuple = l[reversed_idx]
if start_edges is None:
snake_case__ : int = [s == 0 for s in start]
reduce_edge_list(A )
if end_edges is None:
snake_case__ : Optional[Any] = [e == (d - 1) for e, d in zip(A , A )]
reduce_edge_list(A )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(A ) == 0:
return [()]
elif len(A ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
snake_case__ : List[Tuple[slice, ...]] = []
snake_case__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(A , A ):
if s == e:
path_list.append(slice(A , s + 1 ) )
else:
break
snake_case__ : Tuple[slice, ...] = tuple(A )
snake_case__ : List[str] = len(A )
# start == end, and we're done
if divergence_idx == len(A ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case__ : Any = start[divergence_idx]
return tuple(
path + (slice(A , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case__ : Dict = end[divergence_idx]
return tuple(
path + (slice(A , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
snake_case__ : Optional[int] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowercase_ (A : torch.Tensor , A : int , A : int , A : int ):
snake_case__ : List[Any] = t.shape[:no_batch_dims]
snake_case__ : str = list(_flat_idx_to_idx(A , A ) )
# _get_minimal_slice_set is inclusive
snake_case__ : Optional[int] = list(_flat_idx_to_idx(flat_end - 1 , A ) )
# Get an ordered list of slices to perform
snake_case__ : List[str] = _get_minimal_slice_set(
A , A , A , )
snake_case__ : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowercase_ (A : Callable , A : Dict[str, Any] , A : int , A : int , A : bool = False , A : Any = None , A : bool = False , ):
if not (len(A ) > 0):
raise ValueError('Must provide at least one input' )
snake_case__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(A )]
snake_case__ : Dict = tuple([max(A ) for s in zip(*A )] )
def _prep_inputs(A : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
snake_case__ : int = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
snake_case__ : List[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
snake_case__ : str = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
snake_case__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , A )
snake_case__ : Union[str, Any] = None
if _out is not None:
snake_case__ : Optional[Any] = tensor_tree_map(lambda A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
snake_case__ : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
snake_case__ : List[Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(A : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
snake_case__ : str = 0
snake_case__ : Optional[Any] = prepped_outputs
for _ in range(A ):
# Chunk the input
if not low_mem:
snake_case__ : Optional[Any] = _select_chunk
else:
snake_case__ : Union[str, Any] = partial(
_chunk_slice , flat_start=A , flat_end=min(A , i + chunk_size ) , no_batch_dims=len(A ) , )
snake_case__ : Dict[str, Any] = tensor_tree_map(A , A )
# Run the layer on the chunk
snake_case__ : List[str] = layer(**A )
# Allocate space for the output
if out is None:
snake_case__ : Dict = tensor_tree_map(lambda A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , A )
# Put the chunk in its pre-allocated space
if isinstance(A , A ):
def assign(A : dict , A : dict ) -> None:
for k, v in da.items():
if isinstance(A , A ):
assign(A , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
snake_case__ : Dict = da[k]
assign(A , A )
elif isinstance(A , A ):
for xa, xa in zip(A , A ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
snake_case__ : Tuple = xa
elif isinstance(A , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
snake_case__ : Optional[Any] = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
snake_case__ : Dict = tensor_tree_map(lambda A : t.view(orig_batch_dims + t.shape[1:] ) , A )
return out
class snake_case__ :
"""simple docstring"""
def __init__( self : int, _snake_case : int = 5_1_2, ) ->List[str]:
snake_case__ : Optional[int] = max_chunk_size
snake_case__ : Optional[int] = None
snake_case__ : Optional[tuple] = None
def lowercase_ ( self : List[Any], _snake_case : Callable, _snake_case : tuple, _snake_case : int ) ->int:
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
snake_case__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size, 2 ) ) + 1 )]
snake_case__ : Optional[int] = [c for c in candidates if c > min_chunk_size]
snake_case__ : Optional[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_snake_case : int ) -> bool:
try:
with torch.no_grad():
fn(*_snake_case, chunk_size=_snake_case )
return True
except RuntimeError:
return False
snake_case__ : List[Any] = 0
snake_case__ : Optional[Any] = len(_snake_case ) - 1
while i > min_viable_chunk_size_index:
snake_case__ : Optional[int] = test_chunk_size(candidates[i] )
if not viable:
snake_case__ : Optional[int] = (min_viable_chunk_size_index + i) // 2
else:
snake_case__ : Optional[Any] = i
snake_case__ : List[Any] = (i + len(_snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def lowercase_ ( self : List[str], _snake_case : Iterable, _snake_case : Iterable ) ->bool:
snake_case__ : Dict = True
for aa, aa in zip(_snake_case, _snake_case ):
assert type(_snake_case ) == type(_snake_case )
if isinstance(_snake_case, (list, tuple) ):
consistent &= self._compare_arg_caches(_snake_case, _snake_case )
elif isinstance(_snake_case, _snake_case ):
snake_case__ : Any = [v for _, v in sorted(aa.items(), key=lambda _snake_case : x[0] )]
snake_case__ : Any = [v for _, v in sorted(aa.items(), key=lambda _snake_case : x[0] )]
consistent &= self._compare_arg_caches(_snake_case, _snake_case )
else:
consistent &= aa == aa
return consistent
def lowercase_ ( self : List[str], _snake_case : Callable, _snake_case : tuple, _snake_case : int, ) ->int:
snake_case__ : str = True
snake_case__ : tuple = tree_map(lambda _snake_case : a.shape if isinstance(_snake_case, torch.Tensor ) else a, _snake_case, _snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_snake_case )
snake_case__ : List[str] = self._compare_arg_caches(self.cached_arg_data, _snake_case )
else:
# Otherwise, we can reuse the precomputed value
snake_case__ : str = False
if not consistent:
snake_case__ : List[Any] = self._determine_favorable_chunk_size(
_snake_case, _snake_case, _snake_case, )
snake_case__ : Optional[int] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 478
|
from ....utils import logging
a_ :Optional[int] = logging.get_logger(__name__)
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any], _snake_case : List[str], _snake_case : Any=None, _snake_case : Tuple=2_0_4_8 ) ->List[str]:
snake_case__ : Dict = config.__dict__
snake_case__ : Optional[Any] = modal_hidden_size
if num_labels:
snake_case__ : Union[str, Any] = num_labels
| 478
| 1
|
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """ssube/stable-diffusion-x4-upscaler-onnx"""
def A ( self : Union[str, Any] , UpperCamelCase__ : str=0 ):
"""simple docstring"""
UpperCamelCase = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__A ) )
UpperCamelCase = torch.manual_seed(__A )
UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__A )
UpperCamelCase = self.get_dummy_inputs()
UpperCamelCase = pipe(**__A ).images
UpperCamelCase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
UpperCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A )
pipe.set_progress_bar_config(disable=__A )
UpperCamelCase = self.get_dummy_inputs()
UpperCamelCase = pipe(**__A ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
UpperCamelCase = self.get_dummy_inputs()
UpperCamelCase = pipe(**__A ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
UpperCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
UpperCamelCase = self.get_dummy_inputs()
UpperCamelCase = pipe(**__A ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
UpperCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
UpperCamelCase = self.get_dummy_inputs()
UpperCamelCase = pipe(**__A ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def A ( self : List[Any] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = ort.SessionOptions()
UpperCamelCase = False
return options
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
UpperCamelCase = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
UpperCamelCase = 'A fantasy landscape, trending on artstation'
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(
prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__A , output_type='np' , )
UpperCamelCase = output.images
UpperCamelCase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
UpperCamelCase = init_image.resize((1_2_8, 1_2_8) )
UpperCamelCase = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
UpperCamelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
UpperCamelCase = 'A fantasy landscape, trending on artstation'
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(
prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__A , output_type='np' , )
UpperCamelCase = output.images
UpperCamelCase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 718
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
def A ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
UpperCamelCase = DDIMScheduler()
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
UpperCamelCase = CLIPTextModel(UpperCamelCase__ )
UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=0 ):
"""simple docstring"""
UpperCamelCase = torch.manual_seed(UpperCamelCase__ )
UpperCamelCase = {
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = sd_pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : Optional[Any] ):
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def A ( self : Any ):
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5E-3 )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = 'french fries'
UpperCamelCase = sd_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = sd_pipe(**UpperCamelCase__ , view_batch_size=2 )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' )
UpperCamelCase = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = sd_pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , skip_prk_steps=UpperCamelCase__ )
UpperCamelCase = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = sd_pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[str] , UpperCamelCase__ : Optional[int]=0 ):
"""simple docstring"""
UpperCamelCase = torch.manual_seed(UpperCamelCase__ )
UpperCamelCase = {
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = 'stabilityai/stable-diffusion-2-base'
UpperCamelCase = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder='scheduler' )
UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
UpperCamelCase = self.get_inputs()
UpperCamelCase = pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
UpperCamelCase = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' , safety_checker=UpperCamelCase__ )
UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
UpperCamelCase = self.get_inputs()
UpperCamelCase = pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
UpperCamelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = 0
def callback_fn(UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor ) -> None:
UpperCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
UpperCamelCase = latents[0, -3:, -3:, -1]
UpperCamelCase = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
UpperCamelCase = latents[0, -3:, -3:, -1]
UpperCamelCase = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
UpperCamelCase = False
UpperCamelCase = 'stabilityai/stable-diffusion-2-base'
UpperCamelCase = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder='scheduler' )
UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
UpperCamelCase = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
UpperCamelCase = self.get_inputs()
pipe(**UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def A ( self : Optional[int] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = 'stabilityai/stable-diffusion-2-base'
UpperCamelCase = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder='scheduler' )
UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
UpperCamelCase = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase = self.get_inputs()
UpperCamelCase = pipe(**UpperCamelCase__ )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 1_0**9
| 324
| 0
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
a__ : Optional[Any] = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
a__ : List[Any] = cvtColor(img, COLOR_BGR2GRAY)
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = cn.convert_to_negative(__lowerCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def A__ ( ):
"""simple docstring"""
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__lowerCamelCase, 1_1_0 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = imread('digital_image_processing/image_data/lena_small.jpg', 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_lowerCAmelCase = canny.canny(__lowerCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def A__ ( ):
"""simple docstring"""
assert gg.gaussian_filter(__lowerCamelCase, 5, sigma=0.9 ).all()
def A__ ( ):
"""simple docstring"""
# laplace diagonals
_lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_lowerCAmelCase = conv.img_convolve(__lowerCamelCase, __lowerCamelCase ).astype(__lowerCamelCase )
assert res.any()
def A__ ( ):
"""simple docstring"""
assert med.median_filter(__lowerCamelCase, 3 ).any()
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = sob.sobel_filter(__lowerCamelCase )
assert grad.any() and theta.any()
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = sp.make_sepia(__lowerCamelCase, 2_0 )
assert sepia.all()
def A__ ( __lowerCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
_lowerCAmelCase = bs.Burkes(imread(__lowerCamelCase, 1 ), 1_2_0 )
burkes.process()
assert burkes.output_img.any()
def A__ ( __lowerCamelCase = "digital_image_processing/image_data/lena_small.jpg", ):
"""simple docstring"""
_lowerCAmelCase = rs.NearestNeighbour(imread(__lowerCamelCase, 1 ), 4_0_0, 2_0_0 )
nn.process()
assert nn.output.any()
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
_lowerCAmelCase = imread(__lowerCamelCase, 0 )
# Test for get_neighbors_pixel function() return not None
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = image[x_coordinate][y_coordinate]
_lowerCAmelCase = lbp.get_neighbors_pixel(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
_lowerCAmelCase = lbp.local_binary_value(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
assert lbp_image.any()
| 589
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[str] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 589
| 1
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_a : Tuple = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_a : List[Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : Optional[Any] = bs[:]
__UpperCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Dict = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Dict = set()
__UpperCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Optional[Any] = char
return pairs
class __A (__magic_name__ ):
snake_case :Optional[int] = VOCAB_FILES_NAMES
snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
__UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
__UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
__UpperCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : int = json.load(UpperCamelCase_ )
__UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Any = errors # how to handle errors in decoding
__UpperCAmelCase : str = bytes_to_unicode()
__UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _snake_case ( self ):
return len(self.encoder )
def _snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__UpperCAmelCase : str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[str] = 0
while i < len(UpperCamelCase_ ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : str = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Dict = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__UpperCAmelCase : int = get_pairs(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = " ".join(UpperCamelCase_ )
__UpperCAmelCase : Dict = word
return word
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = []
for token in re.findall(self.pat , UpperCamelCase_ ):
__UpperCAmelCase : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(" " ) )
return bpe_tokens
def _snake_case ( self , UpperCamelCase_ ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self , UpperCamelCase_ ):
return self.decoder.get(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = "".join(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : Any = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" )
__UpperCAmelCase : str = 0
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : str = token_index
writer.write(" ".join(UpperCamelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ):
__UpperCAmelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Tuple = " " + text
return (text, kwargs)
| 10
|
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Dict = (boundary[1] - boundary[0]) / steps
__UpperCAmelCase : Tuple = boundary[0]
__UpperCAmelCase : List[str] = boundary[1]
__UpperCAmelCase : List[Any] = make_points(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : int = 0.0
y += (h / 2.0) * f(lowerCamelCase__ )
for i in x_i:
# print(i)
y += h * f(lowerCamelCase__ )
y += (h / 2.0) * f(lowerCamelCase__ )
return y
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = a + h
while x < (b - h):
yield x
__UpperCAmelCase : List[str] = x + h
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: # enter your function here
"""simple docstring"""
__UpperCAmelCase : str = (x - 0) * (x - 0)
return y
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Tuple = 0.0 # Lower bound of integration
__UpperCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration
__UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution
__UpperCAmelCase : Dict = [a, b] # define boundary of integration
__UpperCAmelCase : Optional[int] = method_a(lowerCamelCase__ , lowerCamelCase__ )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 10
| 1
|
import colorsys
from PIL import Image # type: ignore
def a_ ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
_lowerCamelCase : List[Any] =x
_lowerCamelCase : List[Any] =y
for step in range(A_ ): # noqa: B007
_lowerCamelCase : Dict =a * a - b * b + x
_lowerCamelCase : List[str] =2 * a * b + y
_lowerCamelCase : Any =a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def a_ ( SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def a_ ( SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(A_ , 1 , 1 ) )
def a_ ( SCREAMING_SNAKE_CASE__ : int = 800 , SCREAMING_SNAKE_CASE__ : int = 600 , SCREAMING_SNAKE_CASE__ : float = -0.6 , SCREAMING_SNAKE_CASE__ : float = 0 , SCREAMING_SNAKE_CASE__ : float = 3.2 , SCREAMING_SNAKE_CASE__ : int = 50 , SCREAMING_SNAKE_CASE__ : bool = True , ):
'''simple docstring'''
_lowerCamelCase : Tuple =Image.new('RGB' , (image_width, image_height) )
_lowerCamelCase : int =img.load()
# loop through the image-coordinates
for image_x in range(A_ ):
for image_y in range(A_ ):
# determine the figure-coordinates based on the image-coordinates
_lowerCamelCase : Optional[Any] =figure_width / image_width * image_height
_lowerCamelCase : List[Any] =figure_center_x + (image_x / image_width - 0.5) * figure_width
_lowerCamelCase : Optional[Any] =figure_center_y + (image_y / image_height - 0.5) * figure_height
_lowerCamelCase : str =get_distance(A_ , A_ , A_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_lowerCamelCase : Dict =get_color_coded_rgb(A_ )
else:
_lowerCamelCase : str =get_black_and_white_rgb(A_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 464
|
"""simple docstring"""
def snake_case_ ( A_ : int, A_ : int ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def snake_case_ ( ):
'''simple docstring'''
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(F'''| 0 | 0 | {nor_gate(0, 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0, 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1, 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1, 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83
| 0
|
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def lowerCamelCase_ ( _a : Optional[Any] , _a : Any ):
'''simple docstring'''
UpperCAmelCase_ : Dict = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
UpperCAmelCase_ : List[str] = DatasetInfosDict.from_directory(_lowerCamelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ),
] , )
def lowerCamelCase_ ( _a : List[Any] , _a : DatasetInfo ):
'''simple docstring'''
UpperCAmelCase_ : str = str(_lowerCamelCase )
dataset_info.write_to_directory(_lowerCamelCase )
UpperCAmelCase_ : List[str] = DatasetInfo.from_directory(_lowerCamelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_lowerCamelCase , """dataset_info.json""" ) )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
UpperCAmelCase_ : List[str] = dataset_info._to_yaml_dict()
assert sorted(_lowerCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
UpperCAmelCase_ : Optional[Any] = yaml.safe_dump(_lowerCamelCase )
UpperCAmelCase_ : List[Any] = yaml.safe_load(_lowerCamelCase )
assert dataset_info_yaml_dict == reloaded
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = DatasetInfo()
UpperCAmelCase_ : List[str] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=42 ),
"""v2""": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def lowerCamelCase_ ( _a : Tuple , _a : DatasetInfosDict ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = str(_lowerCamelCase )
dataset_infos_dict.write_to_directory(_lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = DatasetInfosDict.from_directory(_lowerCamelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
UpperCAmelCase_ : Optional[int] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
UpperCAmelCase_ : str = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_lowerCamelCase , """README.md""" ) )
| 719
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : List[Any] = "facebook/bart-large-mnli"
A__ : List[Any] = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
A__ : str = "text_classifier"
A__ : Tuple = AutoTokenizer
A__ : int = AutoModelForSequenceClassification
A__ : List[str] = ["text", ["text"]]
A__ : Dict = ["text"]
def A__ ( self: List[str] ) -> List[str]:
super().setup()
UpperCAmelCase_ : Dict = self.model.config
UpperCAmelCase_ : Dict = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
UpperCAmelCase_ : List[str] = int(lowerCamelCase_ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def A__ ( self: Dict ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : str = labels
return self.pre_processor(
[text] * len(lowerCamelCase_ ) ,[F'''This example is {label}''' for label in labels] ,return_tensors="""pt""" ,padding="""max_length""" ,)
def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : int = outputs.logits
UpperCAmelCase_ : Tuple = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 322
| 0
|
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
__a : Optional[Any] = "pytorch_model.bin"
__a : Optional[Any] = "pytorch_model.bin.index.json"
__a : Tuple = "adapter_config.json"
__a : Union[str, Any] = "adapter_model.bin"
__a : List[Any] = "adapter_model.safetensors"
__a : List[Any] = "tf_model.h5"
__a : Union[str, Any] = "tf_model.h5.index.json"
__a : Optional[Any] = "model.ckpt"
__a : Union[str, Any] = "flax_model.msgpack"
__a : Optional[int] = "flax_model.msgpack.index.json"
__a : int = "model.safetensors"
__a : List[str] = "model.safetensors.index.json"
__a : int = "config.json"
__a : Dict = "preprocessor_config.json"
__a : Optional[Any] = FEATURE_EXTRACTOR_NAME
__a : List[str] = "generation_config.json"
__a : List[str] = "modelcard.json"
__a : Union[str, Any] = "▁"
__a : int = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
__a : Optional[Any] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
__a : List[str] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
__a : Tuple = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _SCREAMING_SNAKE_CASE ( __lowercase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if version.parse(__lowercase ) < version.parse(__lowercase ):
if "dev" in min_version:
__A = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
__A = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 637
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = 42
lowerCamelCase_ = None
@staticmethod
def lowerCAmelCase_ ( ):
"""simple docstring"""
raise NotImplementedError
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , **lowercase ):
"""simple docstring"""
raise NotImplementedError
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
raise NotImplementedError
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCAmelCase_ ( cls ):
"""simple docstring"""
return F'''`pip install {cls.pip_package or cls.name}`'''
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''optuna'''
@staticmethod
def lowerCAmelCase_ ( ):
"""simple docstring"""
return is_optuna_available()
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , **lowercase ):
"""simple docstring"""
return run_hp_search_optuna(lowercase , lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return default_hp_space_optuna(lowercase )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''ray'''
lowerCamelCase_ = '''\'ray[tune]\''''
@staticmethod
def lowerCAmelCase_ ( ):
"""simple docstring"""
return is_ray_available()
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , **lowercase ):
"""simple docstring"""
return run_hp_search_ray(lowercase , lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return default_hp_space_ray(lowercase )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''sigopt'''
@staticmethod
def lowerCAmelCase_ ( ):
"""simple docstring"""
return is_sigopt_available()
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , **lowercase ):
"""simple docstring"""
return run_hp_search_sigopt(lowercase , lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return default_hp_space_sigopt(lowercase )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''wandb'''
@staticmethod
def lowerCAmelCase_ ( ):
"""simple docstring"""
return is_wandb_available()
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , **lowercase ):
"""simple docstring"""
return run_hp_search_wandb(lowercase , lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return default_hp_space_wandb(lowercase )
_UpperCAmelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCamelCase ( ):
'''simple docstring'''
A_ : List[str] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__lowercase ) > 0:
A_ : List[str] = available_backends[0].name
if len(__lowercase ) > 1:
logger.info(
f'''{len(__lowercase )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 558
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 635
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 635
| 1
|
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = (CMStochasticIterativeScheduler,)
_UpperCAmelCase = 10
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Any = {
'num_train_timesteps': 201,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
config.update(**_A )
return config
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 10
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : List[str] = self.scheduler_classes[0](**_A )
scheduler.set_timesteps(_A )
_lowerCAmelCase : Any = scheduler.timesteps[0]
_lowerCAmelCase : Tuple = scheduler.timesteps[1]
_lowerCAmelCase : str = self.dummy_sample
_lowerCAmelCase : List[str] = 0.1 * sample
_lowerCAmelCase : Any = scheduler.step(_A ,_A ,_A ).prev_sample
_lowerCAmelCase : List[str] = scheduler.step(_A ,_A ,_A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
_lowerCAmelCase : Tuple = 1
scheduler.set_timesteps(_A )
_lowerCAmelCase : Dict = scheduler.timesteps
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_A ):
# 1. scale model input
_lowerCAmelCase : List[Any] = scheduler.scale_model_input(_A ,_A )
# 2. predict noise residual
_lowerCAmelCase : Any = model(_A ,_A )
# 3. predict previous sample x_t-1
_lowerCAmelCase : Union[str, Any] = scheduler.step(_A ,_A ,_A ,generator=_A ).prev_sample
_lowerCAmelCase : str = pred_prev_sample
_lowerCAmelCase : str = torch.sum(torch.abs(_A ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1E-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**_A )
_lowerCAmelCase : Optional[int] = [106, 0]
scheduler.set_timesteps(timesteps=_A )
_lowerCAmelCase : Tuple = scheduler.timesteps
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : Dict = self.dummy_model()
_lowerCAmelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_lowerCAmelCase : Dict = scheduler.scale_model_input(_A ,_A )
# 2. predict noise residual
_lowerCAmelCase : List[str] = model(_A ,_A )
# 3. predict previous sample x_t-1
_lowerCAmelCase : List[Any] = scheduler.step(_A ,_A ,_A ,generator=_A ).prev_sample
_lowerCAmelCase : str = pred_prev_sample
_lowerCAmelCase : Any = torch.sum(torch.abs(_A ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1E-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**_A )
_lowerCAmelCase : Tuple = [39, 30, 12, 15, 0]
with self.assertRaises(_A ,msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config()
_lowerCAmelCase : Optional[int] = scheduler_class(**_A )
_lowerCAmelCase : Any = [39, 30, 12, 1, 0]
_lowerCAmelCase : Tuple = len(_A )
with self.assertRaises(_A ,msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_A ,timesteps=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
_lowerCAmelCase : Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_A ,msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' ,):
scheduler.set_timesteps(timesteps=_A )
| 259
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase="pt" ):
'''simple docstring'''
_lowerCAmelCase : str = {'add_prefix_space': True} if isinstance(_lowerCamelCase , _lowerCamelCase ) and not line.startswith(' ' ) else {}
_lowerCAmelCase : List[str] = padding_side
return tokenizer(
[line] , max_length=_lowerCamelCase , padding='max_length' if pad_to_max_length else None , truncation=_lowerCamelCase , return_tensors=_lowerCamelCase , add_special_tokens=_lowerCamelCase , **_lowerCamelCase , )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase : str = input_ids.ne(_lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A ,_A ,_A ,_A="train" ,_A=None ,_A=None ,_A=None ,_A="" ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = Path(_A ).joinpath(type_path + '.source' )
_lowerCAmelCase : Optional[int] = Path(_A ).joinpath(type_path + '.target' )
_lowerCAmelCase : List[Any] = self.get_char_lens(self.src_file )
_lowerCAmelCase : Tuple = max_source_length
_lowerCAmelCase : Union[str, Any] = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
_lowerCAmelCase : Dict = tokenizer
_lowerCAmelCase : List[Any] = prefix
if n_obs is not None:
_lowerCAmelCase : int = self.src_lens[:n_obs]
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Any = tgt_lang
def __len__( self ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = index + 1 # linecache starts at 1
_lowerCAmelCase : Optional[int] = self.prefix + linecache.getline(str(self.src_file ) ,_A ).rstrip('\n' )
_lowerCAmelCase : Optional[int] = linecache.getline(str(self.tgt_file ) ,_A ).rstrip('\n' )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_A ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCAmelCase : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_A ) else self.tokenizer
)
_lowerCAmelCase : Dict = self.tokenizer.generator if isinstance(self.tokenizer ,_A ) else self.tokenizer
_lowerCAmelCase : Union[str, Any] = encode_line(_A ,_A ,self.max_source_length ,'right' )
_lowerCAmelCase : Optional[int] = encode_line(_A ,_A ,self.max_target_length ,'right' )
_lowerCAmelCase : Tuple = source_inputs['input_ids'].squeeze()
_lowerCAmelCase : int = target_inputs['input_ids'].squeeze()
_lowerCAmelCase : Optional[Any] = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
return [len(_A ) for x in Path(_A ).open().readlines()]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = torch.stack([x['input_ids'] for x in batch] )
_lowerCAmelCase : List[Any] = torch.stack([x['attention_mask'] for x in batch] )
_lowerCAmelCase : Any = torch.stack([x['decoder_input_ids'] for x in batch] )
_lowerCAmelCase : List[str] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_A )
else self.tokenizer.pad_token_id
)
_lowerCAmelCase : List[str] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_A )
else self.tokenizer.pad_token_id
)
_lowerCAmelCase : int = trim_batch(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = trim_batch(_A ,_A ,attention_mask=_A )
_lowerCAmelCase : List[str] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
_lowerCAmelCase = getLogger(__name__)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return list(itertools.chain.from_iterable(_lowerCamelCase ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = get_git_info()
save_json(_lowerCamelCase , os.path.join(_lowerCamelCase , 'git_log.json' ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=4 , **_lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase , 'w' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=_lowerCamelCase , **_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase ) as f:
return json.load(_lowerCamelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = git.Repo(search_parent_directories=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = {
'repo_id': str(_lowerCamelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return list(map(_lowerCamelCase , _lowerCamelCase ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase , 'wb' ) as f:
return pickle.dump(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
def remove_articles(_lowerCamelCase ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _lowerCamelCase )
def white_space_fix(_lowerCamelCase ):
return " ".join(text.split() )
def remove_punc(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCamelCase ) ) ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = normalize_answer(_lowerCamelCase ).split()
_lowerCAmelCase : List[str] = normalize_answer(_lowerCamelCase ).split()
_lowerCAmelCase : Optional[Any] = Counter(_lowerCamelCase ) & Counter(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sum(common.values() )
if num_same == 0:
return 0
_lowerCAmelCase : Any = 1.0 * num_same / len(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = 1.0 * num_same / len(_lowerCamelCase )
_lowerCAmelCase : str = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return normalize_answer(_lowerCamelCase ) == normalize_answer(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = 0
for hypo, pred in zip(_lowerCamelCase , _lowerCamelCase ):
em += exact_match_score(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
em /= len(_lowerCamelCase )
return {"em": em}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return model_prefix.startswith('rag' )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCAmelCase : List[str] = 'dropout_rate'
for p in extra_params:
if getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if not hasattr(_lowerCamelCase , _lowerCamelCase ) and not hasattr(_lowerCamelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowerCamelCase ) )
delattr(_lowerCamelCase , _lowerCamelCase )
continue
_lowerCAmelCase : Optional[Any] = p if hasattr(_lowerCamelCase , _lowerCamelCase ) else equivalent_param[p]
setattr(_lowerCamelCase , _lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
delattr(_lowerCamelCase , _lowerCamelCase )
return hparams, config
| 259
| 1
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowercase = random.Random()
def _lowerCAmelCase ( __lowerCamelCase:List[str] , __lowerCamelCase:int=1.0 , __lowerCamelCase:Tuple=None , __lowerCamelCase:List[str]=None ):
'''simple docstring'''
if rng is None:
__magic_name__ = global_rng
__magic_name__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A_ ( unittest.TestCase ):
def __init__( self : int , __lowerCamelCase : str , __lowerCamelCase : Any=7 , __lowerCamelCase : Union[str, Any]=4_0_0 , __lowerCamelCase : List[Any]=2_0_0_0 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : List[str]=0.0 , __lowerCamelCase : Dict=1_6_0_0_0 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : int=8_0 , __lowerCamelCase : str=1_6 , __lowerCamelCase : Optional[int]=6_4 , __lowerCamelCase : Tuple="hann_window" , __lowerCamelCase : Union[str, Any]=8_0 , __lowerCamelCase : Any=7_6_0_0 , __lowerCamelCase : Dict=1e-10 , __lowerCamelCase : Tuple=True , ) -> Optional[int]:
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = min_seq_length
__magic_name__ = max_seq_length
__magic_name__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__magic_name__ = feature_size
__magic_name__ = padding_value
__magic_name__ = sampling_rate
__magic_name__ = do_normalize
__magic_name__ = num_mel_bins
__magic_name__ = hop_length
__magic_name__ = win_length
__magic_name__ = win_function
__magic_name__ = fmin
__magic_name__ = fmax
__magic_name__ = mel_floor
__magic_name__ = return_attention_mask
def _snake_case ( self : Tuple ) -> Any:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _snake_case ( self : List[Any] , __lowerCamelCase : str=False , __lowerCamelCase : str=False ) -> Optional[int]:
def _flatten(__lowerCamelCase : Any ):
return list(itertools.chain(*__lowerCamelCase ) )
if equal_length:
__magic_name__ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__magic_name__ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__magic_name__ = [np.asarray(__lowerCamelCase ) for x in speech_inputs]
return speech_inputs
def _snake_case ( self : List[Any] , __lowerCamelCase : Tuple=False , __lowerCamelCase : Any=False ) -> int:
if equal_length:
__magic_name__ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__magic_name__ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__magic_name__ = [np.asarray(__lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class A_ ( snake_case_ , unittest.TestCase ):
UpperCAmelCase__ = SpeechTaFeatureExtractor
def _snake_case ( self : int ) -> Optional[Any]:
__magic_name__ = SpeechTaFeatureExtractionTester(self )
def _snake_case ( self : str , __lowerCamelCase : Dict ) -> Optional[Any]:
self.assertTrue(np.all(np.mean(__lowerCamelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowerCamelCase , axis=0 ) - 1 ) < 1e-3 ) )
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
__magic_name__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__magic_name__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__magic_name__ = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__magic_name__ = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__magic_name__ = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
# Test batched
__magic_name__ = feat_extract(__lowerCamelCase , return_tensors="np" ).input_values
__magic_name__ = feat_extract(__lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def _snake_case ( self : List[Any] ) -> Dict:
__magic_name__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__magic_name__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__magic_name__ = ["longest", "max_length", "do_not_pad"]
__magic_name__ = [None, 1_6_0_0, None]
for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase ):
__magic_name__ = feat_extract(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors="np" )
__magic_name__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _snake_case ( self : int ) -> Optional[int]:
__magic_name__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__magic_name__ = range(8_0_0 , 1_4_0_0 , 2_0_0 )
__magic_name__ = [floats_list((1, x) )[0] for x in lengths]
__magic_name__ = ["longest", "max_length", "do_not_pad"]
__magic_name__ = [None, 1_6_0_0, None]
for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase ):
__magic_name__ = feat_extract(__lowerCamelCase , max_length=__lowerCamelCase , padding=__lowerCamelCase )
__magic_name__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _snake_case ( self : Any ) -> Optional[Any]:
__magic_name__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__magic_name__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__magic_name__ = feat_extract(
__lowerCamelCase , truncation=__lowerCamelCase , max_length=1_0_0_0 , padding="max_length" , return_tensors="np" )
__magic_name__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _snake_case ( self : List[str] ) -> Optional[Any]:
__magic_name__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__magic_name__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__magic_name__ = feat_extract(
__lowerCamelCase , truncation=__lowerCamelCase , max_length=1_0_0_0 , padding="longest" , return_tensors="np" )
__magic_name__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
__magic_name__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__magic_name__ = feat_extract(
__lowerCamelCase , truncation=__lowerCamelCase , max_length=2_0_0_0 , padding="longest" , return_tensors="np" )
__magic_name__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def _snake_case ( self : Any ) -> Tuple:
__magic_name__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__magic_name__ = np.random.rand(1_0_0 ).astype(np.floataa )
__magic_name__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__magic_name__ = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__magic_name__ = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _snake_case ( self : Tuple ) -> List[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
__magic_name__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__magic_name__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__magic_name__ = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
__magic_name__ = feature_extractor(audio_target=__lowerCamelCase , padding=__lowerCamelCase , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
__magic_name__ = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
__magic_name__ = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
# Test batched
__magic_name__ = feature_extractor(__lowerCamelCase , return_tensors="np" ).input_values
__magic_name__ = feature_extractor(__lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__magic_name__ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__magic_name__ = np.asarray(__lowerCamelCase )
__magic_name__ = feature_extractor(__lowerCamelCase , return_tensors="np" ).input_values
__magic_name__ = feature_extractor(__lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def _snake_case ( self : Any ) -> List[str]:
__magic_name__ = self.feat_extract_tester.prepare_inputs_for_target()
__magic_name__ = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__ = feat_extract.model_input_names[0]
__magic_name__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__lowerCamelCase ) == len(__lowerCamelCase ) for x, y in zip(__lowerCamelCase , processed_features[input_name] ) ) )
__magic_name__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__lowerCamelCase )
__magic_name__ = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
__magic_name__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__magic_name__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _snake_case ( self : List[str] ) -> Union[str, Any]:
__magic_name__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__lowerCamelCase )
__magic_name__ = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__ = feat_extract.model_input_names[0]
__magic_name__ = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
__magic_name__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__magic_name__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _snake_case ( self : Dict ) -> Tuple:
__magic_name__ = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__ = self.feat_extract_tester.prepare_inputs_for_target()
__magic_name__ = feat_extract.model_input_names[0]
__magic_name__ = BatchFeature({input_name: speech_inputs} )
__magic_name__ = feat_extract.num_mel_bins # hack!
__magic_name__ = feat_extract.pad(__lowerCamelCase , padding="longest" , return_tensors="np" )[input_name]
__magic_name__ = feat_extract.pad(__lowerCamelCase , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _snake_case ( self : List[Any] ) -> str:
__magic_name__ = self.feat_extract_dict
__magic_name__ = True
__magic_name__ = self.feature_extraction_class(**__lowerCamelCase )
__magic_name__ = self.feat_extract_tester.prepare_inputs_for_target()
__magic_name__ = [len(__lowerCamelCase ) for x in speech_inputs]
__magic_name__ = feat_extract.model_input_names[0]
__magic_name__ = BatchFeature({input_name: speech_inputs} )
__magic_name__ = feat_extract.num_mel_bins # hack!
__magic_name__ = feat_extract.pad(__lowerCamelCase , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __lowerCamelCase )
def _snake_case ( self : Any ) -> Optional[int]:
__magic_name__ = self.feat_extract_dict
__magic_name__ = True
__magic_name__ = self.feature_extraction_class(**__lowerCamelCase )
__magic_name__ = self.feat_extract_tester.prepare_inputs_for_target()
__magic_name__ = [len(__lowerCamelCase ) for x in speech_inputs]
__magic_name__ = feat_extract.model_input_names[0]
__magic_name__ = BatchFeature({input_name: speech_inputs} )
__magic_name__ = min(__lowerCamelCase )
__magic_name__ = feat_extract.num_mel_bins # hack!
__magic_name__ = feat_extract.pad(
__lowerCamelCase , padding="max_length" , max_length=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="np" )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _snake_case ( self : int , __lowerCamelCase : str ) -> List[Any]:
from datasets import load_dataset
__magic_name__ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__magic_name__ = ds.sort("id" ).select(range(__lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _snake_case ( self : Any ) -> Dict:
# fmt: off
__magic_name__ = torch.tensor(
[2.38_04e-03, 2.07_52e-03, 1.98_36e-03, 2.10_57e-03, 1.61_74e-03,
3.05_18e-04, 9.15_53e-05, 3.35_69e-04, 9.76_56e-04, 1.83_11e-03,
2.01_42e-03, 2.10_57e-03, 1.73_95e-03, 4.57_76e-04, -3.96_73e-04,
4.57_76e-04, 1.00_71e-03, 9.15_53e-05, 4.88_28e-04, 1.15_97e-03,
7.32_42e-04, 9.46_04e-04, 1.80_05e-03, 1.83_11e-03, 8.85_01e-04,
4.27_25e-04, 4.88_28e-04, 7.32_42e-04, 1.09_86e-03, 2.10_57e-03] )
# fmt: on
__magic_name__ = self._load_datasamples(1 )
__magic_name__ = SpeechTaFeatureExtractor()
__magic_name__ = feature_extractor(__lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , __lowerCamelCase , atol=1e-6 ) )
def _snake_case ( self : List[str] ) -> List[Any]:
# fmt: off
__magic_name__ = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
__magic_name__ = self._load_datasamples(1 )
__magic_name__ = SpeechTaFeatureExtractor()
__magic_name__ = feature_extractor(audio_target=__lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , __lowerCamelCase , atol=1e-4 ) )
| 721
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class A_ :
UpperCAmelCase__ = LEDConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=1_3 , __lowerCamelCase : str=7 , __lowerCamelCase : Any=True , __lowerCamelCase : int=False , __lowerCamelCase : List[Any]=9_9 , __lowerCamelCase : Any=3_2 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Dict=4 , __lowerCamelCase : int=3_7 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=2_0 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : str=1 , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : Dict=4 , ) -> Optional[Any]:
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = eos_token_id
__magic_name__ = pad_token_id
__magic_name__ = bos_token_id
__magic_name__ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__magic_name__ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__magic_name__ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _snake_case ( self : Dict ) -> Optional[Any]:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__magic_name__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__magic_name__ = tf.concat([input_ids, eos_tensor] , axis=1 )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__magic_name__ = prepare_led_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__magic_name__ = tf.concat(
[tf.zeros_like(__lowerCamelCase )[:, :-1], tf.ones_like(__lowerCamelCase )[:, -1:]] , axis=-1 , )
__magic_name__ = global_attention_mask
return config, inputs_dict
def _snake_case ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any ) -> Union[str, Any]:
__magic_name__ = TFLEDModel(config=__lowerCamelCase ).get_decoder()
__magic_name__ = inputs_dict["input_ids"]
__magic_name__ = input_ids[:1, :]
__magic_name__ = inputs_dict["attention_mask"][:1, :]
__magic_name__ = 1
# first forward pass
__magic_name__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
__magic_name__ , __magic_name__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__magic_name__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__magic_name__ = tf.concat([input_ids, next_tokens] , axis=-1 )
__magic_name__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__magic_name__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
__magic_name__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__magic_name__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__magic_name__ = output_from_no_past[:, -3:, random_slice_idx]
__magic_name__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1e-3 )
def _lowerCAmelCase ( __lowerCamelCase:str , __lowerCamelCase:str , __lowerCamelCase:List[Any] , __lowerCamelCase:Any=None , __lowerCamelCase:Dict=None , __lowerCamelCase:List[Any]=None , __lowerCamelCase:Union[str, Any]=None , ):
'''simple docstring'''
if attention_mask is None:
__magic_name__ = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__magic_name__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__magic_name__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class A_ ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCAmelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCAmelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _snake_case ( self : int ) -> Optional[int]:
__magic_name__ = TFLEDModelTester(self )
__magic_name__ = ConfigTester(self , config_class=__lowerCamelCase )
def _snake_case ( self : Optional[Any] ) -> Dict:
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[int] ) -> List[Any]:
__magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
def _snake_case ( self : List[str] ) -> str:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = tf.zeros_like(inputs_dict["attention_mask"] )
__magic_name__ = 2
__magic_name__ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
__magic_name__ = True
__magic_name__ = self.model_tester.seq_length
__magic_name__ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__lowerCamelCase : int ):
__magic_name__ = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__lowerCamelCase : Any ):
__magic_name__ = [t.numpy() for t in outputs.encoder_attentions]
__magic_name__ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = model_class(__lowerCamelCase )
__magic_name__ = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__magic_name__ = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
__magic_name__ = model_class(__lowerCamelCase )
__magic_name__ = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ = True
__magic_name__ = model_class(__lowerCamelCase )
__magic_name__ = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
__magic_name__ = True
__magic_name__ = True
__magic_name__ = model_class(__lowerCamelCase )
__magic_name__ = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def _snake_case ( self : Union[str, Any] ) -> List[str]:
pass
def _snake_case ( self : int ) -> str:
# TODO: Head-masking not yet implement
pass
def _lowerCAmelCase ( __lowerCamelCase:Optional[int] ):
'''simple docstring'''
return tf.constant(__lowerCamelCase , dtype=tf.intaa )
lowercase = 1e-4
@slow
@require_tf
class A_ ( unittest.TestCase ):
def _snake_case ( self : Optional[Any] ) -> List[str]:
__magic_name__ = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
__magic_name__ = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__magic_name__ = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__magic_name__ = prepare_led_inputs_dict(model.config , __lowerCamelCase , __lowerCamelCase )
__magic_name__ = model(**__lowerCamelCase )[0]
__magic_name__ = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , __lowerCamelCase )
# change to expected output here
__magic_name__ = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-3 )
def _snake_case ( self : Any ) -> Dict:
__magic_name__ = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
__magic_name__ = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__magic_name__ = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__magic_name__ = prepare_led_inputs_dict(model.config , __lowerCamelCase , __lowerCamelCase )
__magic_name__ = model(**__lowerCamelCase )[0]
__magic_name__ = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , __lowerCamelCase )
# change to expected output here
__magic_name__ = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-3 , rtol=1e-3 )
| 468
| 0
|
UpperCAmelCase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
assert type(__SCREAMING_SNAKE_CASE ) in (int, float) and decimal == int(__SCREAMING_SNAKE_CASE )
lowercase = int(__SCREAMING_SNAKE_CASE )
lowercase = ''
lowercase = False
if decimal < 0:
lowercase = True
decimal *= -1
while decimal > 0:
lowercase , lowercase = divmod(__SCREAMING_SNAKE_CASE , 16 )
lowercase = values[remainder] + hexadecimal
lowercase = '0x' + hexadecimal
if negative:
lowercase = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _lowercase ( __lowerCamelCase ):
_lowercase : Any = 'table-transformer'
_lowercase : List[Any] = ['past_key_values']
_lowercase : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[Any] , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Any=3 , lowerCamelCase__ : Any=1_0_0 , lowerCamelCase__ : int=6 , lowerCamelCase__ : List[Any]=2_0_4_8 , lowerCamelCase__ : List[str]=8 , lowerCamelCase__ : int=6 , lowerCamelCase__ : List[str]=2_0_4_8 , lowerCamelCase__ : Optional[int]=8 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : str=True , lowerCamelCase__ : Optional[Any]="relu" , lowerCamelCase__ : Optional[int]=2_5_6 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : str=0.0 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Dict=0.02 , lowerCamelCase__ : Union[str, Any]=1.0 , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : Optional[int]="sine" , lowerCamelCase__ : str="resnet50" , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Any=1 , lowerCamelCase__ : int=5 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : List[str]=1 , lowerCamelCase__ : Union[str, Any]=1 , lowerCamelCase__ : Optional[Any]=5 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : List[str]=0.1 , **lowerCamelCase__ : List[str] , ) -> Dict:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ = backbone_config.get('''model_type''' )
A_ = CONFIG_MAPPING[backbone_model_type]
A_ = config_class.from_dict(lowerCamelCase__ )
# set timm attributes to None
A_ ,A_ ,A_ = None, None, None
A_ = use_timm_backbone
A_ = backbone_config
A_ = num_channels
A_ = num_queries
A_ = d_model
A_ = encoder_ffn_dim
A_ = encoder_layers
A_ = encoder_attention_heads
A_ = decoder_ffn_dim
A_ = decoder_layers
A_ = decoder_attention_heads
A_ = dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = activation_function
A_ = init_std
A_ = init_xavier_std
A_ = encoder_layerdrop
A_ = decoder_layerdrop
A_ = encoder_layers
A_ = auxiliary_loss
A_ = position_embedding_type
A_ = backbone
A_ = use_pretrained_backbone
A_ = dilation
# Hungarian matcher
A_ = class_cost
A_ = bbox_cost
A_ = giou_cost
# Loss coefficients
A_ = mask_loss_coefficient
A_ = dice_loss_coefficient
A_ = bbox_loss_coefficient
A_ = giou_loss_coefficient
A_ = eos_coefficient
super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
return self.d_model
class _lowercase ( __lowerCamelCase ):
_lowercase : Tuple = version.parse('1.11' )
@property
def UpperCamelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def UpperCamelCase ( self : List[Any] ) -> float:
"""simple docstring"""
return 1e-5
@property
def UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
return 1_2
| 203
| 0
|
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_snake_case = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def snake_case ( _a: Optional[int] , _a: List[Any] , _a: List[Any]=None )-> str:
'''simple docstring'''
if rng is None:
lowerCamelCase__ = random.Random()
lowerCamelCase__ = 1
for dim in shape:
total_dims *= dim
lowerCamelCase__ = []
for _ in range(_a ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowerCamelCase__ = np.array(_a , dtype=jnp.intaa ).reshape(_a )
return output
def snake_case ( _a: Optional[Any] , _a: Tuple=None )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = ids_tensor(_a , vocab_size=2 , rng=_a )
# make sure that at least one token is attended to for each batch
lowerCamelCase__ = 1
return attn_mask
@require_flax
class _a :
a_ : Optional[Any] = None
a_ : List[Any] = ()
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowerCamelCase__ = 2
lowerCamelCase__ = inputs['input_ids'].shape[-1] // 2
lowerCamelCase__ = inputs['input_ids'][:max_batch_size, :sequence_length]
lowerCamelCase__ = jnp.ones_like(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowerCamelCase__ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowerCamelCase__ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = False
lowerCamelCase__ = max_length
lowerCamelCase__ = 0
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pt_model_class(SCREAMING_SNAKE_CASE__ ).eval()
lowerCamelCase__ = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , flax_model.params )
lowerCamelCase__ = flax_model.generate(SCREAMING_SNAKE_CASE__ ).sequences
lowerCamelCase__ = pt_model.generate(torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowerCamelCase__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = False
lowerCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = True
lowerCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = False
lowerCamelCase__ = max_length
lowerCamelCase__ = 2
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = False
lowerCamelCase__ = max_length
lowerCamelCase__ = 2
lowerCamelCase__ = 2
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = True
lowerCamelCase__ = max_length
lowerCamelCase__ = 0.8
lowerCamelCase__ = 10
lowerCamelCase__ = 0.3
lowerCamelCase__ = 1
lowerCamelCase__ = 8
lowerCamelCase__ = 9
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = max_length
lowerCamelCase__ = 1
lowerCamelCase__ = 8
lowerCamelCase__ = 9
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
lowerCamelCase__ = max_length
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 8
lowerCamelCase__ = 9
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCamelCase__ = attention_mask.at[(0, 0)].set(0 )
lowerCamelCase__ = False
lowerCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCamelCase__ = attention_mask.at[(0, 0)].set(0 )
lowerCamelCase__ = True
lowerCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCamelCase__ = attention_mask.at[(0, 0)].set(0 )
lowerCamelCase__ = 2
lowerCamelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model.generate(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = jit(model.generate )
lowerCamelCase__ = jit_generate(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' )
lowerCamelCase__ = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
lowerCamelCase__ = 'Hello world'
lowerCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='np' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'do_samples' ):
model.generate(SCREAMING_SNAKE_CASE__ , do_samples=SCREAMING_SNAKE_CASE__ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'foo' ):
lowerCamelCase__ = {'foo': 'bar'}
model.generate(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 659
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[PIL.Image.Image, np.ndarray]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : PriorTransformer , SCREAMING_SNAKE_CASE__ : CLIPVisionModel , SCREAMING_SNAKE_CASE__ : CLIPImageProcessor , SCREAMING_SNAKE_CASE__ : HeunDiscreteScheduler , SCREAMING_SNAKE_CASE__ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE__ , image_encoder=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , renderer=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
if latents is None:
lowerCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase__ = latents.to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase__ = torch.device(F'cuda:{gpu_id}' )
lowerCamelCase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : Dict ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE__ , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE__ , axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase__ = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.image_encoder(SCREAMING_SNAKE_CASE__ )['last_hidden_state']
lowerCamelCase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase__ = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ = torch.zeros_like(SCREAMING_SNAKE_CASE__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 25 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ):
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowerCamelCase__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
F'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE__ )}' )
lowerCamelCase__ = self._execution_device
lowerCamelCase__ = batch_size * num_images_per_prompt
lowerCamelCase__ = guidance_scale > 1.0
lowerCamelCase__ = self._encode_image(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.scheduler.timesteps
lowerCamelCase__ = self.prior.config.num_embeddings
lowerCamelCase__ = self.prior.config.embedding_dim
lowerCamelCase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase__ = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.prior(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , proj_embedding=SCREAMING_SNAKE_CASE__ , ).predicted_image_embedding
# remove the variance
lowerCamelCase__ , lowerCamelCase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase__ = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE__ ):
print()
lowerCamelCase__ = self.renderer.decode(
latent[None, :] , SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.stack(SCREAMING_SNAKE_CASE__ )
if output_type not in ["np", "pil"]:
raise ValueError(F'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase__ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase__ = [self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 659
| 1
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCAmelCase__ : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCAmelCase__ : List[str] = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
lowerCAmelCase__ = self.transformer_dir
shutil.copy(
os.path.join(__magic_name__ , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = "src/transformers"
shutil.rmtree(self.transformer_dir )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Tuple=None ):
"""simple docstring"""
lowerCAmelCase__ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCAmelCase__ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCAmelCase__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase__ = black.format_str(__magic_name__ , mode=__magic_name__ )
lowerCAmelCase__ = os.path.join(self.transformer_dir , "new_code.py" )
with open(__magic_name__ , "w" , newline="\n" ) as f:
f.write(__magic_name__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__magic_name__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__magic_name__ )
with open(__magic_name__ , "r" ) as f:
self.assertTrue(f.read() , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , __magic_name__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , __magic_name__ ) , )
# Copy consistency with a really long name
lowerCAmelCase__ = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub("Bert" , __magic_name__ , __magic_name__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , __magic_name__ , overwrite_result=re.sub("Bert" , "TestModel" , __magic_name__ ) , )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
lowerCAmelCase__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
lowerCAmelCase__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
lowerCAmelCase__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
lowerCAmelCase__ ,lowerCAmelCase__ = check_copies.convert_to_localized_md(
__magic_name__ , __magic_name__ , localized_readme["format_model_list"] )
self.assertFalse(__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ ,lowerCAmelCase__ = check_copies.convert_to_localized_md(
__magic_name__ , __magic_name__ , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__magic_name__ )
lowerCAmelCase__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
lowerCAmelCase__ = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
lowerCAmelCase__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
lowerCAmelCase__ ,lowerCAmelCase__ = check_copies.convert_to_localized_md(
__magic_name__ , __magic_name__ , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(__magic_name__ , __magic_name__ )
| 48
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case__ = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case__ = field(default=UpperCamelCase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
snake_case__ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
lowerCAmelCase__ = import_module("tasks" )
try:
lowerCAmelCase__ = getattr(lowerCAmelCase_ , model_args.task_type )
lowerCAmelCase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowerCAmelCase__ = token_classification_task.get_labels(data_args.labels )
lowerCAmelCase__ = dict(enumerate(lowerCAmelCase_ ) )
lowerCAmelCase__ = len(lowerCAmelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , labelaid={label: i for i, label in enumerate(lowerCAmelCase_ )} , cache_dir=model_args.cache_dir , )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowerCAmelCase__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCAmelCase__ = (
TokenClassificationDataset(
token_classification_task=lowerCAmelCase_ , data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , labels=lowerCAmelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCAmelCase__ = (
TokenClassificationDataset(
token_classification_task=lowerCAmelCase_ , data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , labels=lowerCAmelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : np.ndarray ) -> Tuple[List[int], List[int]]:
lowerCAmelCase__ = np.argmax(lowerCAmelCase_ , axis=2 )
lowerCAmelCase__ , lowerCAmelCase__ = preds.shape
lowerCAmelCase__ = [[] for _ in range(lowerCAmelCase_ )]
lowerCAmelCase__ = [[] for _ in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(lowerCAmelCase_ : EvalPrediction ) -> Dict:
lowerCAmelCase__ , lowerCAmelCase__ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(lowerCAmelCase_ , lowerCAmelCase_ ),
"precision": precision_score(lowerCAmelCase_ , lowerCAmelCase_ ),
"recall": recall_score(lowerCAmelCase_ , lowerCAmelCase_ ),
"f1": fa_score(lowerCAmelCase_ , lowerCAmelCase_ ),
}
# Data collator
lowerCAmelCase__ = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase__ = trainer.evaluate()
lowerCAmelCase__ = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , lowerCAmelCase_ , lowerCAmelCase_ )
writer.write("%s = %s\n" % (key, value) )
results.update(lowerCAmelCase_ )
# Predict
if training_args.do_predict:
lowerCAmelCase__ = TokenClassificationDataset(
token_classification_task=lowerCAmelCase_ , data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , labels=lowerCAmelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = trainer.predict(lowerCAmelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = align_predictions(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , lowerCAmelCase_ , lowerCAmelCase_ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
lowerCAmelCase__ = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return results
def _A ( lowerCAmelCase_ : Tuple ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 61
| 0
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self):
lowerCamelCase__ = 0
lowerCamelCase__ = [0]
lowerCamelCase__ = [0]
lowerCamelCase__ = len(UpperCamelCase)
self.assertEqual(k.knapsack(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase) , 0)
lowerCamelCase__ = [60]
lowerCamelCase__ = [10]
lowerCamelCase__ = len(UpperCamelCase)
self.assertEqual(k.knapsack(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase) , 0)
def __UpperCAmelCase ( self):
lowerCamelCase__ = 3
lowerCamelCase__ = [1, 2, 3]
lowerCamelCase__ = [3, 2, 1]
lowerCamelCase__ = len(UpperCamelCase)
self.assertEqual(k.knapsack(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase) , 5)
def __UpperCAmelCase ( self):
lowerCamelCase__ = 50
lowerCamelCase__ = [60, 1_00, 1_20]
lowerCamelCase__ = [10, 20, 30]
lowerCamelCase__ = len(UpperCamelCase)
self.assertEqual(k.knapsack(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase) , 2_20)
if __name__ == "__main__":
unittest.main()
| 426
|
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class snake_case_ ( A__ ):
"""simple docstring"""
__lowerCAmelCase : BigBirdConfig
__lowerCAmelCase : jnp.dtype =jnp.floataa
__lowerCAmelCase : bool =True
def __UpperCAmelCase ( self):
super().setup()
lowerCamelCase__ = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *UpperCamelCase , **UpperCamelCase):
lowerCamelCase__ = super().__call__(*UpperCamelCase , **UpperCamelCase)
lowerCamelCase__ = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class snake_case_ ( A__ ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] =FlaxBigBirdForNaturalQuestionsModule
def lowerCAmelCase( a__ : List[str] , a__ : Dict , a__ : Optional[Any] , a__ : List[str] , a__ : str , a__ : Optional[int] ):
'''simple docstring'''
def cross_entropy(a__ : Union[str, Any] , a__ : Any , a__ : List[Any]=None ):
lowerCamelCase__ = logits.shape[-1]
lowerCamelCase__ = (labels[..., None] == jnp.arange(a__ )[None]).astype("f4" )
lowerCamelCase__ = jax.nn.log_softmax(a__ , axis=-1 )
lowerCamelCase__ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowerCamelCase__ = reduction(a__ )
return loss
lowerCamelCase__ = partial(a__ , reduction=jnp.mean )
lowerCamelCase__ = cross_entropy(a__ , a__ )
lowerCamelCase__ = cross_entropy(a__ , a__ )
lowerCamelCase__ = cross_entropy(a__ , a__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class snake_case_ :
"""simple docstring"""
__lowerCAmelCase : str ="google/bigbird-roberta-base"
__lowerCAmelCase : int =3_0_0_0
__lowerCAmelCase : int =1_0_5_0_0
__lowerCAmelCase : int =1_2_8
__lowerCAmelCase : int =3
__lowerCAmelCase : int =1
__lowerCAmelCase : int =5
# tx_args
__lowerCAmelCase : float =3e-5
__lowerCAmelCase : float =0.0
__lowerCAmelCase : int =2_0_0_0_0
__lowerCAmelCase : float =0.0095
__lowerCAmelCase : str ="bigbird-roberta-natural-questions"
__lowerCAmelCase : str ="training-expt"
__lowerCAmelCase : str ="data/nq-training.jsonl"
__lowerCAmelCase : str ="data/nq-validation.jsonl"
def __UpperCAmelCase ( self):
os.makedirs(self.base_dir , exist_ok=UpperCamelCase)
lowerCamelCase__ = os.path.join(self.base_dir , self.save_dir)
lowerCamelCase__ = self.batch_size_per_device * jax.device_count()
@dataclass
class snake_case_ :
"""simple docstring"""
__lowerCAmelCase : int
__lowerCAmelCase : int =4_0_9_6 # no dynamic padding on TPUs
def __call__( self , UpperCamelCase):
lowerCamelCase__ = self.collate_fn(UpperCamelCase)
lowerCamelCase__ = jax.tree_util.tree_map(UpperCamelCase , UpperCamelCase)
return batch
def __UpperCAmelCase ( self , UpperCamelCase):
lowerCamelCase__ , lowerCamelCase__ = self.fetch_inputs(features["input_ids"])
lowerCamelCase__ = {
"input_ids": jnp.array(UpperCamelCase , dtype=jnp.intaa),
"attention_mask": jnp.array(UpperCamelCase , dtype=jnp.intaa),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa),
}
return batch
def __UpperCAmelCase ( self , UpperCamelCase):
lowerCamelCase__ = [self._fetch_inputs(UpperCamelCase) for ids in input_ids]
return zip(*UpperCamelCase)
def __UpperCAmelCase ( self , UpperCamelCase):
lowerCamelCase__ = [1 for _ in range(len(UpperCamelCase))]
while len(UpperCamelCase) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def lowerCAmelCase( a__ : List[Any] , a__ : Tuple , a__ : Any=None ):
'''simple docstring'''
if seed is not None:
lowerCamelCase__ = dataset.shuffle(seed=a__ )
for i in range(len(a__ ) // batch_size ):
lowerCamelCase__ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(a__ )
@partial(jax.pmap , axis_name="batch" )
def lowerCAmelCase( a__ : List[Any] , a__ : Tuple , **a__ : Union[str, Any] ):
'''simple docstring'''
def loss_fn(a__ : str ):
lowerCamelCase__ = model_inputs.pop("start_labels" )
lowerCamelCase__ = model_inputs.pop("end_labels" )
lowerCamelCase__ = model_inputs.pop("pooled_labels" )
lowerCamelCase__ = state.apply_fn(**a__ , params=a__ , dropout_rng=a__ , train=a__ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = outputs
return state.loss_fn(
a__ , a__ , a__ , a__ , a__ , a__ , )
lowerCamelCase__ , lowerCamelCase__ = jax.random.split(a__ )
lowerCamelCase__ = jax.value_and_grad(a__ )
lowerCamelCase__ , lowerCamelCase__ = grad_fn(state.params )
lowerCamelCase__ = jax.lax.pmean({"loss": loss} , axis_name="batch" )
lowerCamelCase__ = jax.lax.pmean(a__ , "batch" )
lowerCamelCase__ = state.apply_gradients(grads=a__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def lowerCAmelCase( a__ : int , **a__ : Tuple ):
'''simple docstring'''
lowerCamelCase__ = model_inputs.pop("start_labels" )
lowerCamelCase__ = model_inputs.pop("end_labels" )
lowerCamelCase__ = model_inputs.pop("pooled_labels" )
lowerCamelCase__ = state.apply_fn(**a__ , params=state.params , train=a__ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = outputs
lowerCamelCase__ = state.loss_fn(a__ , a__ , a__ , a__ , a__ , a__ )
lowerCamelCase__ = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class snake_case_ ( train_state.TrainState ):
"""simple docstring"""
__lowerCAmelCase : Callable =struct.field(pytree_node=A__ )
@dataclass
class snake_case_ :
"""simple docstring"""
__lowerCAmelCase : Args
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : wandb
__lowerCAmelCase : Callable =None
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None):
lowerCamelCase__ = model.params
lowerCamelCase__ = TrainState.create(
apply_fn=model.__call__ , params=UpperCamelCase , tx=UpperCamelCase , loss_fn=UpperCamelCase , )
if ckpt_dir is not None:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = restore_checkpoint(UpperCamelCase , UpperCamelCase)
lowerCamelCase__ = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
lowerCamelCase__ , lowerCamelCase__ = build_tx(**UpperCamelCase)
lowerCamelCase__ = train_state.TrainState(
step=UpperCamelCase , apply_fn=model.__call__ , params=UpperCamelCase , tx=UpperCamelCase , opt_state=UpperCamelCase , )
lowerCamelCase__ = args
lowerCamelCase__ = data_collator
lowerCamelCase__ = lr
lowerCamelCase__ = params
lowerCamelCase__ = jax_utils.replicate(UpperCamelCase)
return state
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = self.args
lowerCamelCase__ = len(UpperCamelCase) // args.batch_size
lowerCamelCase__ = jax.random.PRNGKey(0)
lowerCamelCase__ = jax.random.split(UpperCamelCase , jax.device_count())
for epoch in range(args.max_epochs):
lowerCamelCase__ = jnp.array(0 , dtype=jnp.floataa)
lowerCamelCase__ = get_batched_dataset(UpperCamelCase , args.batch_size , seed=UpperCamelCase)
lowerCamelCase__ = 0
for batch in tqdm(UpperCamelCase , total=UpperCamelCase , desc=f"""Running EPOCH-{epoch}"""):
lowerCamelCase__ = self.data_collator(UpperCamelCase)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.train_step_fn(UpperCamelCase , UpperCamelCase , **UpperCamelCase)
running_loss += jax_utils.unreplicate(metrics["loss"])
i += 1
if i % args.logging_steps == 0:
lowerCamelCase__ = jax_utils.unreplicate(state.step)
lowerCamelCase__ = running_loss.item() / i
lowerCamelCase__ = self.scheduler_fn(state_step - 1)
lowerCamelCase__ = self.evaluate(UpperCamelCase , UpperCamelCase)
lowerCamelCase__ = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(UpperCamelCase))
self.logger.log(UpperCamelCase , commit=UpperCamelCase)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"""-e{epoch}-s{i}""" , state=UpperCamelCase)
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = get_batched_dataset(UpperCamelCase , self.args.batch_size)
lowerCamelCase__ = len(UpperCamelCase) // self.args.batch_size
lowerCamelCase__ = jnp.array(0 , dtype=jnp.floataa)
lowerCamelCase__ = 0
for batch in tqdm(UpperCamelCase , total=UpperCamelCase , desc="Evaluating ... "):
lowerCamelCase__ = self.data_collator(UpperCamelCase)
lowerCamelCase__ = self.val_step_fn(UpperCamelCase , **UpperCamelCase)
running_loss += jax_utils.unreplicate(metrics["loss"])
i += 1
return running_loss / i
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = jax_utils.unreplicate(UpperCamelCase)
print(f"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... ")
self.model_save_fn(UpperCamelCase , params=state.params)
with open(os.path.join(UpperCamelCase , "opt_state.msgpack") , "wb") as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(UpperCamelCase , "args.joblib"))
joblib.dump(self.data_collator , os.path.join(UpperCamelCase , "data_collator.joblib"))
with open(os.path.join(UpperCamelCase , "training_state.json") , "w") as f:
json.dump({"step": state.step.item()} , UpperCamelCase)
print("DONE")
def lowerCAmelCase( a__ : str , a__ : Optional[int] ):
'''simple docstring'''
print(f"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " )
with open(os.path.join(a__ , "flax_model.msgpack" ) , "rb" ) as f:
lowerCamelCase__ = from_bytes(state.params , f.read() )
with open(os.path.join(a__ , "opt_state.msgpack" ) , "rb" ) as f:
lowerCamelCase__ = from_bytes(state.opt_state , f.read() )
lowerCamelCase__ = joblib.load(os.path.join(a__ , "args.joblib" ) )
lowerCamelCase__ = joblib.load(os.path.join(a__ , "data_collator.joblib" ) )
with open(os.path.join(a__ , "training_state.json" ) , "r" ) as f:
lowerCamelCase__ = json.load(a__ )
lowerCamelCase__ = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def lowerCAmelCase( a__ : Union[str, Any] , a__ : List[str] , a__ : Tuple , a__ : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ = num_train_steps - warmup_steps
lowerCamelCase__ = optax.linear_schedule(init_value=a__ , end_value=a__ , transition_steps=a__ )
lowerCamelCase__ = optax.linear_schedule(init_value=a__ , end_value=1E-7 , transition_steps=a__ )
lowerCamelCase__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowerCAmelCase( a__ : Any , a__ : Union[str, Any] , a__ : Optional[int] , a__ : List[str] , a__ : Union[str, Any] ):
'''simple docstring'''
def weight_decay_mask(a__ : str ):
lowerCamelCase__ = traverse_util.flatten_dict(a__ )
lowerCamelCase__ = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(a__ )
lowerCamelCase__ = scheduler_fn(a__ , a__ , a__ , a__ )
lowerCamelCase__ = optax.adamw(learning_rate=a__ , weight_decay=a__ , mask=a__ )
return tx, lr
| 426
| 1
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _UpperCAmelCase ( a__):
'''simple docstring'''
return {key.lstrip("""-"""): value for key, value in zip(unknown_args[::2] , unknown_args[1::2])}
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : List[str] = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=a__)
a_ : str = parser.add_subparsers(help="""datasets-cli command helpers""")
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(a__)
EnvironmentCommand.register_subcommand(a__)
TestCommand.register_subcommand(a__)
RunBeamCommand.register_subcommand(a__)
DummyDataCommand.register_subcommand(a__)
# Parse args
a_ , a_ : Optional[int] = parser.parse_known_args()
if not hasattr(a__ , """func"""):
parser.print_help()
exit(1)
a_ : Tuple = parse_unknown_args(a__)
# Run
a_ : Any = args.func(a__ , **a__)
service.run()
if __name__ == "__main__":
main()
| 540
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _UpperCAmelCase ( a__ , a__ , a__):
'''simple docstring'''
a_ : List[Any] = 0
if start < end:
a_ : Dict = randint(a__ , a__)
a_ : List[str] = a[end]
a_ : Tuple = a[pivot]
a_ : Tuple = temp
a_ , a_ : List[Any] = _in_place_partition(a__ , a__ , a__)
count += _in_place_quick_sort(a__ , a__ , p - 1)
count += _in_place_quick_sort(a__ , p + 1 , a__)
return count
def _UpperCAmelCase ( a__ , a__ , a__):
'''simple docstring'''
a_ : Optional[int] = 0
a_ : Union[str, Any] = randint(a__ , a__)
a_ : Union[str, Any] = a[end]
a_ : Any = a[pivot]
a_ : Any = temp
a_ : int = start - 1
for index in range(a__ , a__):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
a_ : str = new_pivot_index + 1
a_ : Optional[Any] = a[new_pivot_index]
a_ : str = a[index]
a_ : Union[str, Any] = temp
a_ : Union[str, Any] = a[new_pivot_index + 1]
a_ : Tuple = a[end]
a_ : Any = temp
return new_pivot_index + 1, count
__snake_case : Union[str, Any] = TemporaryFile()
__snake_case : Dict = 1_00 # 1000 elements are to be sorted
__snake_case , __snake_case : int = 0, 1 # mean and standard deviation
__snake_case : Tuple = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
__snake_case : str = np.load(outfile)
__snake_case : Dict = len(M) - 1
__snake_case : Dict = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 540
| 1
|
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = "owlvit_text_model"
def __init__( self : List[str] , _snake_case : Optional[Any]=49408 , _snake_case : int=512 , _snake_case : List[Any]=2048 , _snake_case : Optional[int]=12 , _snake_case : Union[str, Any]=8 , _snake_case : Optional[Any]=16 , _snake_case : Tuple="quick_gelu" , _snake_case : Optional[Any]=1e-5 , _snake_case : Any=0.0 , _snake_case : List[str]=0.02 , _snake_case : List[str]=1.0 , _snake_case : Optional[Any]=0 , _snake_case : Tuple=49406 , _snake_case : Union[str, Any]=49407 , **_snake_case : Optional[Any] , ) -> Tuple:
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = initializer_factor
@classmethod
def lowerCAmelCase_ ( cls : Tuple , _snake_case : Union[str, os.PathLike] , **_snake_case : Optional[int] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(_snake_case , **_snake_case )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
SCREAMING_SNAKE_CASE__ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_snake_case , **_snake_case )
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = "owlvit_vision_model"
def __init__( self : Any , _snake_case : List[Any]=768 , _snake_case : Optional[Any]=3072 , _snake_case : List[str]=12 , _snake_case : Tuple=12 , _snake_case : int=3 , _snake_case : Dict=768 , _snake_case : Optional[int]=32 , _snake_case : Optional[Any]="quick_gelu" , _snake_case : Union[str, Any]=1e-5 , _snake_case : Optional[Any]=0.0 , _snake_case : Optional[int]=0.02 , _snake_case : Optional[int]=1.0 , **_snake_case : Any , ) -> Optional[int]:
super().__init__(**_snake_case )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = initializer_factor
@classmethod
def lowerCAmelCase_ ( cls : Dict , _snake_case : Union[str, os.PathLike] , **_snake_case : int ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(_snake_case , **_snake_case )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
SCREAMING_SNAKE_CASE__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_snake_case , **_snake_case )
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = "owlvit"
a = True
def __init__( self : Tuple , _snake_case : str=None , _snake_case : List[Any]=None , _snake_case : List[Any]=512 , _snake_case : Dict=2.6592 , _snake_case : int=True , **_snake_case : List[Any] , ) -> str:
super().__init__(**_snake_case )
if text_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
SCREAMING_SNAKE_CASE__ = OwlViTTextConfig(**_snake_case )
SCREAMING_SNAKE_CASE__ = OwlViTVisionConfig(**_snake_case )
SCREAMING_SNAKE_CASE__ = projection_dim
SCREAMING_SNAKE_CASE__ = logit_scale_init_value
SCREAMING_SNAKE_CASE__ = return_dict
SCREAMING_SNAKE_CASE__ = 1.0
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] , _snake_case : Union[str, os.PathLike] , **_snake_case : str ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(_snake_case , **_snake_case )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_snake_case , **_snake_case )
@classmethod
def lowerCAmelCase_ ( cls : str , _snake_case : Dict , _snake_case : Dict , **_snake_case : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = text_config
SCREAMING_SNAKE_CASE__ = vision_config
return cls.from_dict(_snake_case , **_snake_case )
def lowerCAmelCase_ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def lowerCAmelCase_ ( self : List[str] ) -> float:
return 1e-4
def lowerCAmelCase_ ( self : Optional[Any] , _snake_case : "ProcessorMixin" , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_snake_case , seq_length=_snake_case , framework=_snake_case )
SCREAMING_SNAKE_CASE__ = super().generate_dummy_inputs(
processor.image_processor , batch_size=_snake_case , framework=_snake_case )
return {**text_input_dict, **image_input_dict}
@property
def lowerCAmelCase_ ( self : str ) -> int:
return 14
| 538
|
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_A = ['small', 'medium', 'large']
_A = 'lm_head.decoder.weight'
_A = 'lm_head.weight'
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__ = torch.load(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = d.pop(__UpperCAmelCase )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
torch.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
_A = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_A = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl')
_A = F'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 538
| 1
|
'''simple docstring'''
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = val
__A : Tuple = None
__A : Any = None
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__A : Union[str, Any] = Node(_UpperCAmelCase)
else:
self.left.insert(_UpperCAmelCase)
elif val > self.val:
if self.right is None:
__A : Dict = Node(_UpperCAmelCase)
else:
self.right.insert(_UpperCAmelCase)
else:
__A : str = val
def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : int ) -> Union[str, Any]:
# Recursive traversal
if root:
inorder(root.left , __snake_case )
res.append(root.val )
inorder(root.right , __snake_case )
def _lowerCAmelCase ( __snake_case : List[str] ) -> Union[str, Any]:
# Build BST
if len(__snake_case ) == 0:
return arr
__A : List[str] = Node(arr[0] )
for i in range(1 , len(__snake_case ) ):
root.insert(arr[i] )
# Traverse BST in order.
__A : Tuple = []
inorder(__snake_case , __snake_case )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 8
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowerCAmelCase_ = logging.get_logger(__name__)
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "vision-encoder-decoder"
a = True
def __init__( self : str , **_A : str):
"""simple docstring"""
super().__init__(**_A)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""")
_SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop("""encoder""")
_SCREAMING_SNAKE_CASE : Optional[int] = encoder_config.pop("""model_type""")
_SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("""decoder""")
_SCREAMING_SNAKE_CASE : Tuple = decoder_config.pop("""model_type""")
_SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.for_model(_A , **_A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.for_model(_A , **_A)
_SCREAMING_SNAKE_CASE : Any = True
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] , _A : PretrainedConfig , _A : PretrainedConfig , **_A : Optional[int]):
"""simple docstring"""
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""")
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Optional[int] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_A)
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = copy.deepcopy(self.__dict__)
_SCREAMING_SNAKE_CASE : Dict = self.encoder.to_dict()
_SCREAMING_SNAKE_CASE : List[Any] = self.decoder.to_dict()
_SCREAMING_SNAKE_CASE : List[Any] = self.__class__.model_type
return output
class _snake_case ( __snake_case ):
"""simple docstring"""
a = version.parse("1.11" )
@property
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
return 1e-4
@property
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}})
class _snake_case ( __snake_case ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = OrderedDict()
_SCREAMING_SNAKE_CASE : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_SCREAMING_SNAKE_CASE : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_SCREAMING_SNAKE_CASE : Optional[int] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def _lowerCAmelCase ( self : Dict , _A : "PreTrainedTokenizerBase" , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional["TensorType"] = None , ):
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE : List[Any] = OrderedDict()
_SCREAMING_SNAKE_CASE : Any = super().generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = dummy_input["""input_ids"""].shape
_SCREAMING_SNAKE_CASE : str = (batch, encoder_sequence, self._config.encoder_hidden_size)
_SCREAMING_SNAKE_CASE : Any = dummy_input.pop("""input_ids""")
_SCREAMING_SNAKE_CASE : Optional[int] = dummy_input.pop("""attention_mask""")
_SCREAMING_SNAKE_CASE : str = torch.zeros(_A)
return common_inputs
class _snake_case ( __snake_case ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Any , _A : PretrainedConfig):
"""simple docstring"""
return VisionEncoderDecoderEncoderOnnxConfig(_A)
def _lowerCAmelCase ( self : List[Any] , _A : PretrainedConfig , _A : PretrainedConfig , _A : str = "default"):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_A , _A)
| 338
| 0
|
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A_ : int | str ):
"""simple docstring"""
a_ : List[str] = str(A_ )
return n == n[::-1]
def _snake_case ( A_ : int = 100_0000 ):
"""simple docstring"""
a_ : Tuple = 0
for i in range(1 , A_ ):
if is_palindrome(A_ ) and is_palindrome(bin(A_ ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 460
|
'''simple docstring'''
from math import factorial
__snake_case: List[Any] = {str(d): factorial(d) for d in range(10)}
def _snake_case ( A_ : int ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(A_ ) )
def _snake_case ( ):
"""simple docstring"""
a_ : Union[str, Any] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , A_ ) if sum_of_digit_factorial(A_ ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 460
| 1
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def A ( UpperCamelCase_ : Union[str, Any] ) -> str:
'''simple docstring'''
lowerCAmelCase__ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCAmelCase__ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase__ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase__ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCAmelCase__ = [3, 3, 3, 3]
lowerCAmelCase__ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCAmelCase__ = [4, 4, 4, 4]
lowerCAmelCase__ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCAmelCase__ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCAmelCase__ = [3, 3, 3, 3]
else:
lowerCAmelCase__ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCAmelCase__ = 96
elif "small" in model_name:
lowerCAmelCase__ = 96
elif "base" in model_name:
lowerCAmelCase__ = 1_28
elif "large" in model_name:
lowerCAmelCase__ = 1_92
elif "xlarge" in model_name:
lowerCAmelCase__ = 2_56
elif "huge" in model_name:
lowerCAmelCase__ = 3_52
# set label information
lowerCAmelCase__ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCAmelCase__ = '''imagenet-22k-id2label.json'''
else:
lowerCAmelCase__ = '''imagenet-1k-id2label.json'''
lowerCAmelCase__ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = FocalNetConfig(
embed_dim=lowerCAmelCase__ , depths=lowerCAmelCase__ , focal_levels=lowerCAmelCase__ , focal_windows=lowerCAmelCase__ , use_conv_embed=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , use_post_layernorm=lowerCAmelCase__ , use_layerscale=lowerCAmelCase__ , )
return config
def A ( UpperCamelCase_ : str ) -> str:
'''simple docstring'''
if "patch_embed.proj" in name:
lowerCAmelCase__ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowerCAmelCase__ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowerCAmelCase__ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCAmelCase__ = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
lowerCAmelCase__ = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
lowerCAmelCase__ = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCAmelCase__ = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCAmelCase__ = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCAmelCase__ = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
lowerCAmelCase__ = '''layernorm.weight'''
if name == "norm.bias":
lowerCAmelCase__ = '''layernorm.bias'''
if "head" in name:
lowerCAmelCase__ = name.replace("head" , "classifier" )
else:
lowerCAmelCase__ = '''focalnet.''' + name
return name
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCAmelCase__ = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCAmelCase__ )
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="cpu" )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase__ = state_dict.pop(lowerCAmelCase__ )
lowerCAmelCase__ = val
lowerCAmelCase__ = get_focalnet_config(lowerCAmelCase__ )
lowerCAmelCase__ = FocalNetForImageClassification(lowerCAmelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# verify conversion
lowerCAmelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase__ = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={"shortest_edge": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase__ , crop_size=2_24 , do_normalize=lowerCAmelCase__ , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ , )
lowerCAmelCase__ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
lowerCAmelCase__ = processor(images=lowerCAmelCase__ , return_tensors="pt" )
lowerCAmelCase__ = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowerCAmelCase__ = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCAmelCase__ , atol=1E-4 )
lowerCAmelCase__ = model(**lowerCAmelCase__ )
lowerCAmelCase__ = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCAmelCase__ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowerCAmelCase__ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowerCAmelCase__ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowerCAmelCase__ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowerCAmelCase__ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowerCAmelCase__ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
UpperCAmelCase__ : int = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 48
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __UpperCamelCase ( lowerCAmelCase__ : List[str] ):
__a : Any = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCamelCase__ ( __lowercase ,__lowercase ,__lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = StableDiffusionLatentUpscalePipeline
_SCREAMING_SNAKE_CASE : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
_SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
_SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_SCREAMING_SNAKE_CASE : Optional[int] = frozenset([] )
_SCREAMING_SNAKE_CASE : Optional[int] = True
@property
def lowerCAmelCase (self : Optional[int] ):
__a : Union[str, Any] = 1
__a : Dict = 4
__a : int = (1_6, 1_6)
__a : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case_ )
return image
def lowerCAmelCase (self : int ):
torch.manual_seed(0 )
__a : Dict = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=snake_case_ , block_out_channels=[3_2, 3_2, 6_4, 6_4] , time_cond_proj_dim=1_6_0 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=3_2 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=snake_case_ , only_cross_attention=snake_case_ , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
__a : Optional[int] = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4, 6_4] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
__a : Dict = EulerDiscreteScheduler(prediction_type='''sample''' )
__a : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''quick_gelu''' , projection_dim=5_1_2 , )
__a : int = CLIPTextModel(snake_case_ )
__a : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__a : Tuple = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowerCAmelCase (self : List[str] , snake_case_ : Tuple , snake_case_ : List[Any]=0 ):
if str(snake_case_ ).startswith('''mps''' ):
__a : Any = torch.manual_seed(snake_case_ )
else:
__a : Optional[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__a : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase (self : Tuple ):
__a : Optional[int] = '''cpu'''
__a : Union[str, Any] = self.get_dummy_components()
__a : Any = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__a : int = self.get_dummy_inputs(snake_case_ )
__a : Dict = pipe(**snake_case_ ).images
__a : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_5_6, 2_5_6, 3) )
__a : List[str] = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
__a : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case_ , 1E-3 )
def lowerCAmelCase (self : Tuple ):
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def lowerCAmelCase (self : Any ):
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def lowerCAmelCase (self : Optional[Any] ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCAmelCase (self : Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def lowerCAmelCase (self : List[str] ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def lowerCAmelCase (self : Tuple ):
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCAmelCase (self : Optional[int] ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCAmelCase (self : Union[str, Any] ):
__a : List[Any] = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
__a : List[str] = self.get_dummy_components()
__a : List[Any] = self.pipeline_class(**snake_case_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__a : Optional[Any] = self.get_dummy_inputs(snake_case_ )
__a : List[Any] = 2
__a : str = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__a : Union[str, Any] = getattr(snake_case_ , scheduler_enum.name )
__a : Any = scheduler_cls.from_config(pipe.scheduler.config )
__a : Any = pipe(**snake_case_ )[0]
outputs.append(snake_case_ )
assert check_same_shape(snake_case_ )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase (self : Union[str, Any] ):
__a : Union[str, Any] = torch.manual_seed(3_3 )
__a : List[Any] = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
__a : str = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
__a : int = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
__a : Dict = pipe(snake_case_ , generator=snake_case_ , output_type='''latent''' ).images
__a : Any = upscaler(
prompt=snake_case_ , image=snake_case_ , num_inference_steps=2_0 , guidance_scale=0 , generator=snake_case_ , output_type='''np''' , ).images[0]
__a : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def lowerCAmelCase (self : List[Any] ):
__a : int = torch.manual_seed(3_3 )
__a : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
__a : Optional[int] = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
__a : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
__a : Any = upscaler(
prompt=snake_case_ , image=snake_case_ , num_inference_steps=2_0 , guidance_scale=0 , generator=snake_case_ , output_type='''np''' , ).images[0]
__a : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 521
| 0
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase = logging.getLogger()
def A_ ( ):
"""simple docstring"""
a__ = argparse.ArgumentParser()
parser.add_argument("""-f""" )
a__ = parser.parse_args()
return args.f
class __snake_case ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def _a ( self ):
a__ = logging.StreamHandler(sys.stdout )
logger.addHandler(a_ )
def _a ( self , a_ ):
a__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(a_ , """argv""" , a_ ):
a__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(a_ , 0.666 )
@slow
@require_torch_non_multi_gpu
def _a ( self ):
a__ = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(a_ )
a__ = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(a_ )
a__ = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(a_ )
| 706
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """spiece.model"""}
UpperCAmelCase = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
UpperCAmelCase = {"""bert_for_seq_generation""": 512}
class __snake_case ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[int] = []
UpperCamelCase__ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , a_ , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<pad>" , a_="<::::>" , a_ = None , **a_ , ):
a__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=a_ , eos_token=a_ , unk_token=a_ , pad_token=a_ , sep_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
a__ = vocab_file
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def _a ( self ):
return self.sp_model.get_piece_size()
def _a ( self ):
a__ = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
a__ = self.__dict__.copy()
a__ = None
return state
def __setstate__( self , a_ ):
a__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
a__ = {}
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , a_ ):
return self.sp_model.encode(a_ , out_type=a_ )
def _a ( self , a_ ):
return self.sp_model.piece_to_id(a_ )
def _a ( self , a_ ):
a__ = self.sp_model.IdToPiece(a_ )
return token
def _a ( self , a_ ):
a__ = []
a__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a_ ) + token
a__ = []
else:
current_sub_tokens.append(a_ )
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def _a ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ = os.path.join(
a_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , """wb""" ) as fi:
a__ = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 351
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
_snake_case = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
_snake_case = {
'''RUCAIBox/mvp''': 10_24,
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = VOCAB_FILES_NAMES
__A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Dict = ["input_ids", "attention_mask"]
__A : List[str] = MvpTokenizer
def __init__( self , __A=None , __A=None , __A=None , __A="replace" , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=False , __A=True , **__A , ):
"""simple docstring"""
super().__init__(
__A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , )
lowerCamelCase : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __A ) != add_prefix_space:
lowerCamelCase : Optional[Any] = getattr(__A , pre_tok_state.pop("type" ) )
lowerCamelCase : List[str] = add_prefix_space
lowerCamelCase : Dict = pre_tok_class(**__A )
lowerCamelCase : Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCamelCase : Dict = "post_processor"
lowerCamelCase : Tuple = getattr(self.backend_tokenizer , __A , __A )
if tokenizer_component_instance:
lowerCamelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase : Optional[Any] = tuple(state["sep"] )
if "cls" in state:
lowerCamelCase : Dict = tuple(state["cls"] )
lowerCamelCase : str = False
if state.get("add_prefix_space" , __A ) != add_prefix_space:
lowerCamelCase : Tuple = add_prefix_space
lowerCamelCase : int = True
if state.get("trim_offsets" , __A ) != trim_offsets:
lowerCamelCase : int = trim_offsets
lowerCamelCase : Dict = True
if changes_to_apply:
lowerCamelCase : Union[str, Any] = getattr(__A , state.pop("type" ) )
lowerCamelCase : List[str] = component_class(**__A )
setattr(self.backend_tokenizer , __A , __A )
@property
def _snake_case ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : int = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value
lowerCamelCase : List[str] = value
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
lowerCamelCase : str = kwargs.get("is_split_into_words" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A , **__A )
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
lowerCamelCase : List[Any] = kwargs.get("is_split_into_words" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A , **__A )
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
lowerCamelCase : Dict = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def _snake_case ( self , __A , __A=None ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
lowerCamelCase : int = [self.sep_token_id]
lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 340
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A = None , __A = [] ):
"""simple docstring"""
lowerCamelCase : Any = 0
lowerCamelCase : Optional[int] = choices
lowerCamelCase : Optional[int] = prompt
if sys.platform == "win32":
lowerCamelCase : Any = "*"
else:
lowerCamelCase : Union[str, Any] = "➔ "
def _snake_case ( self , __A , __A = "" ):
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __A )
else:
forceWrite(self.choices[index] , __A )
def _snake_case ( self , __A ):
"""simple docstring"""
if index == self.position:
forceWrite(F""" {self.arrow_char} """ )
self.write_choice(__A )
else:
forceWrite(F""" {self.choices[index]}""" )
reset_cursor()
def _snake_case ( self , __A , __A = 1 ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__A )
move_cursor(__A , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def _snake_case ( self ):
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def _snake_case ( self ):
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def _snake_case ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def _snake_case ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__A )] for number in range(10 )] )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = int(chr(self.current_selection ) )
lowerCamelCase : Union[str, Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __A )
else:
return
else:
return
def _snake_case ( self , __A = 0 ):
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
lowerCamelCase : Any = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__A )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
lowerCamelCase : str = int(builtins.input() )
except ValueError:
lowerCamelCase : Optional[Any] = default_choice
else:
lowerCamelCase : Optional[Any] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(__A , "\n" )
return choice
| 340
| 1
|
import math
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(__magic_name__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_SCREAMING_SNAKE_CASE : int = 'Enter the base and the power separated by a comma: '
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = map(int, input(prompt).split(','))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
_SCREAMING_SNAKE_CASE : List[Any] = res(xa, ya)
_SCREAMING_SNAKE_CASE : List[str] = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 206
|
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
return abs(__magic_name__ ) if a == 0 else greatest_common_divisor(b % a , __magic_name__ )
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_lowercase , _lowercase: Any = y, x % y
return abs(__magic_name__ )
def __lowerCAmelCase ( ):
try:
_lowercase: Optional[int] = input("Enter two integers separated by comma (,): " ).split("," )
_lowercase: Any = int(nums[0] )
_lowercase: int = int(nums[1] )
print(
f"greatest_common_divisor({num_a}, {num_a}) = "
f"{greatest_common_divisor(__magic_name__ , __magic_name__ )}" )
print(f"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__magic_name__ , __magic_name__ )}" )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 206
| 1
|
'''simple docstring'''
from timeit import timeit
lowercase : str = {
'''MALAYALAM''': True,
'''String''': False,
'''rotor''': True,
'''level''': True,
'''A''': True,
'''BB''': True,
'''ABC''': False,
'''amanaplanacanalpanama''': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def __a ( A__ ) -> bool:
lowerCAmelCase = 0
lowerCAmelCase = len(_lowercase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def __a ( A__ ) -> bool:
lowerCAmelCase = len(_lowercase ) // 2
lowerCAmelCase = len(_lowercase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_lowercase ) )
def __a ( A__ ) -> bool:
if len(_lowercase ) <= 2:
return True
if s[0] == s[len(_lowercase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def __a ( A__ ) -> bool:
return s == s[::-1]
def __a ( A__ ) -> None:
lowerCAmelCase = f"all({name}(key) is value for key, value in test_data.items())"
lowerCAmelCase = f"from __main__ import test_data, {name}"
lowerCAmelCase = 50_0000
lowerCAmelCase = timeit(stmt=_lowercase , setup=_lowercase , number=_lowercase )
print(f"{name:<35} finished {number:,} runs in {result:.5f} seconds" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"{key:21} {value}")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 649
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __lowercase ( unittest.TestCase ):
def __init__(self , A , A=7 , A=3 , A=3_0 , A=4_0_0 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=True , A=1 / 2_5_5 , A=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase_ : int = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
lowerCamelCase_ : Any = parent
lowerCamelCase_ : Tuple = batch_size
lowerCamelCase_ : Union[str, Any] = num_channels
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : List[Any] = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : Optional[int] = do_normalize
lowerCamelCase_ : Union[str, Any] = image_mean
lowerCamelCase_ : str = image_std
lowerCamelCase_ : List[Any] = do_rescale
lowerCamelCase_ : str = rescale_factor
lowerCamelCase_ : Optional[int] = do_pad
def UpperCAmelCase__ (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ (self , A , A=False ):
if not batched:
lowerCamelCase_ : Any = image_inputs[0]
if isinstance(A , Image.Image ):
lowerCamelCase_, lowerCamelCase_ : int = image.size
else:
lowerCamelCase_, lowerCamelCase_ : Any = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ : str = int(self.size['''shortest_edge'''] * h / w )
lowerCamelCase_ : Optional[Any] = self.size['''shortest_edge''']
elif w > h:
lowerCamelCase_ : Union[str, Any] = self.size['''shortest_edge''']
lowerCamelCase_ : Any = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCamelCase_ : Any = self.size['''shortest_edge''']
lowerCamelCase_ : Tuple = self.size['''shortest_edge''']
else:
lowerCamelCase_ : Optional[Any] = []
for image in image_inputs:
lowerCamelCase_, lowerCamelCase_ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ : Dict = max(A , key=lambda A : item[0] )[0]
lowerCamelCase_ : int = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = DetaImageProcessor if is_vision_available() else None
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = DetaImageProcessingTester(self )
@property
def UpperCAmelCase__ (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''do_rescale''' ) )
self.assertTrue(hasattr(A , '''do_pad''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , A )
def UpperCAmelCase__ (self ):
pass
def UpperCAmelCase__ (self ):
# Initialize image_processing
lowerCamelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowerCamelCase_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_, lowerCamelCase_ : Any = self.image_processor_tester.get_expected_values(A , batched=A )
lowerCamelCase_ : Tuple = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ (self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ : List[str] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Dict = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ (self ):
# Initialize image_processing
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : Any = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ : Optional[Any] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ : List[Any] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ (self ):
# prepare image and target
lowerCamelCase_ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowerCamelCase_ : Any = json.loads(f.read() )
lowerCamelCase_ : str = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
lowerCamelCase_ : Optional[Any] = DetaImageProcessor()
lowerCamelCase_ : Optional[int] = image_processing(images=A , annotations=A , return_tensors='''pt''' )
# verify pixel values
lowerCamelCase_ : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowerCamelCase_ : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1E-4 ) )
# verify area
lowerCamelCase_ : Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowerCamelCase_ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowerCamelCase_ : int = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ : int = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowerCamelCase_ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowerCamelCase_ : List[str] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify orig_size
lowerCamelCase_ : Optional[int] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowerCamelCase_ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
@slow
def UpperCAmelCase__ (self ):
# prepare image, target and masks_path
lowerCamelCase_ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowerCamelCase_ : Tuple = json.loads(f.read() )
lowerCamelCase_ : Tuple = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
lowerCamelCase_ : List[str] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCamelCase_ : Any = DetaImageProcessor(format='''coco_panoptic''' )
lowerCamelCase_ : Dict = image_processing(images=A , annotations=A , masks_path=A , return_tensors='''pt''' )
# verify pixel values
lowerCamelCase_ : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowerCamelCase_ : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1E-4 ) )
# verify area
lowerCamelCase_ : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowerCamelCase_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowerCamelCase_ : int = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowerCamelCase_ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowerCamelCase_ : Dict = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify masks
lowerCamelCase_ : Tuple = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A )
# verify orig_size
lowerCamelCase_ : Any = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowerCamelCase_ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
| 422
| 0
|
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowercase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Dict:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
lowercase = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
lowercase = dict(scheduler.config )
lowercase = 1
lowercase = FrozenDict(_lowerCAmelCase )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
lowercase = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
lowercase = dict(scheduler.config )
lowercase = True
lowercase = FrozenDict(_lowerCAmelCase )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=_lowerCAmelCase , segmentation_processor=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , )
def _a ( self , _lowerCAmelCase = "auto" ) -> Optional[int]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
self.enable_attention_slicing(_lowerCAmelCase )
def _a ( self ) -> Tuple:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowercase = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 512 , _lowerCAmelCase = 512 , _lowerCAmelCase = 50 , _lowerCAmelCase = 7.5 , _lowerCAmelCase = None , _lowerCAmelCase = 1 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "pil" , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = 1 , **_lowerCAmelCase , ) -> Dict:
'''simple docstring'''
lowercase = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
lowercase = self.segmentation_model(**_lowerCAmelCase )
lowercase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase = self.numpy_to_pil(_lowerCAmelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , height=_lowerCAmelCase , width=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , negative_prompt=_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase , eta=_lowerCAmelCase , generator=_lowerCAmelCase , latents=_lowerCAmelCase , output_type=_lowerCAmelCase , return_dict=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=_lowerCAmelCase , )
| 653
|
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE ( ):
lowercase = os.path.join(os.path.dirname(lowercase_ ) , """num.txt""" )
with open(lowercase_ ) as file_hand:
return str(sum(int(lowercase_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 653
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__UpperCAmelCase : Optional[Any] = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class __lowerCAmelCase (lowerCamelCase__ ):
'''simple docstring'''
a__ = 'facebook/nllb-200-distilled-600M'
a__ = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
a__ = 'translator'
a__ = AutoTokenizer
a__ = AutoModelForSeqaSeqLM
a__ = LANGUAGE_CODES
a__ = ['text', 'text', 'text']
a__ = ['text']
def _a ( self , a , a , a ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
snake_case_ :int = self.lang_to_code[src_lang]
snake_case_ :Dict = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__UpperCamelCase , return_tensors="pt" , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase )
def _a ( self , a ):
"""simple docstring"""
return self.model.generate(**__UpperCamelCase )
def _a ( self , a ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__UpperCamelCase )
| 584
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : List[str] = tempfile.mkdtemp()
__UpperCamelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
__UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__UpperCamelCase : Optional[Any] = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
"do_convert_rgb": True,
}
__UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , __UpperCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self , **__UpperCamelCase ) -> Dict:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __lowerCamelCase ( self , **__UpperCamelCase ) -> Any:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __lowerCamelCase ( self , **__UpperCamelCase ) -> Dict:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__UpperCamelCase : Dict = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : str = self.get_tokenizer()
__UpperCamelCase : Union[str, Any] = self.get_rust_tokenizer()
__UpperCamelCase : Any = self.get_image_processor()
__UpperCamelCase : str = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCamelCase : Optional[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCamelCase )
__UpperCamelCase : Union[str, Any] = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCamelCase : Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCamelCase )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Any = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase : Optional[Any] = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
__UpperCamelCase : Tuple = self.get_image_processor(do_normalize=__UpperCamelCase )
__UpperCamelCase : List[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__UpperCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : List[str] = self.get_image_processor()
__UpperCamelCase : List[str] = self.get_tokenizer()
__UpperCamelCase : Tuple = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__UpperCamelCase : Optional[Any] = self.prepare_image_inputs()
__UpperCamelCase : List[str] = image_processor(__UpperCamelCase , return_tensors="np" )
__UpperCamelCase : List[Any] = processor(images=__UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = self.get_image_processor()
__UpperCamelCase : Union[str, Any] = self.get_tokenizer()
__UpperCamelCase : int = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__UpperCamelCase : int = "Alexandra,T-shirt的价格是15便士。"
__UpperCamelCase : int = processor(text=__UpperCamelCase )
__UpperCamelCase : int = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : List[str] = self.get_image_processor()
__UpperCamelCase : List[str] = self.get_tokenizer()
__UpperCamelCase : Optional[int] = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__UpperCamelCase : str = "Alexandra,T-shirt的价格是15便士。"
__UpperCamelCase : List[Any] = self.prepare_image_inputs()
__UpperCamelCase : Union[str, Any] = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
__UpperCamelCase : Tuple = self.get_image_processor()
__UpperCamelCase : Any = self.get_tokenizer()
__UpperCamelCase : Dict = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__UpperCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase : str = processor.batch_decode(__UpperCamelCase )
__UpperCamelCase : Dict = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Optional[int] = self.get_image_processor()
__UpperCamelCase : Tuple = self.get_tokenizer()
__UpperCamelCase : Dict = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__UpperCamelCase : Tuple = "Alexandra,T-shirt的价格是15便士。"
__UpperCamelCase : Optional[int] = self.prepare_image_inputs()
__UpperCamelCase : Tuple = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 327
| 0
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase__ ( __snake_case , unittest.TestCase ):
__snake_case : Tuple = MobileBertTokenizer
__snake_case : int = MobileBertTokenizerFast
__snake_case : Optional[int] = True
__snake_case : Optional[Any] = True
__snake_case : Any = filter_non_english
__snake_case : List[str] = "google/mobilebert-uncased"
def A__ ( self ):
super().setUp()
_A : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_A : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_A : List[str] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A__ ( self ,A__ ):
_A : List[Any] = '''UNwant\u00E9d,running'''
_A : Union[str, Any] = '''unwanted, running'''
return input_text, output_text
def A__ ( self ):
_A : List[Any] = self.tokenizer_class(self.vocab_file )
_A : Tuple = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A__ ,['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) ,[9, 6, 7, 12, 10, 11] )
def A__ ( self ):
if not self.test_rust_tokenizer:
return
_A : Tuple = self.get_tokenizer()
_A : str = self.get_rust_tokenizer()
_A : Tuple = '''UNwant\u00E9d,running'''
_A : Optional[int] = tokenizer.tokenize(A__ )
_A : Optional[int] = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ ,A__ )
_A : Optional[Any] = tokenizer.encode(A__ ,add_special_tokens=A__ )
_A : List[Any] = rust_tokenizer.encode(A__ ,add_special_tokens=A__ )
self.assertListEqual(A__ ,A__ )
_A : Union[str, Any] = self.get_rust_tokenizer()
_A : str = tokenizer.encode(A__ )
_A : Optional[int] = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ ,A__ )
# With lower casing
_A : Optional[int] = self.get_tokenizer(do_lower_case=A__ )
_A : Union[str, Any] = self.get_rust_tokenizer(do_lower_case=A__ )
_A : Tuple = '''UNwant\u00E9d,running'''
_A : int = tokenizer.tokenize(A__ )
_A : Union[str, Any] = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ ,A__ )
_A : str = tokenizer.encode(A__ ,add_special_tokens=A__ )
_A : int = rust_tokenizer.encode(A__ ,add_special_tokens=A__ )
self.assertListEqual(A__ ,A__ )
_A : Union[str, Any] = self.get_rust_tokenizer()
_A : int = tokenizer.encode(A__ )
_A : List[str] = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ ,A__ )
def A__ ( self ):
_A : Tuple = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) ,['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A__ ( self ):
_A : Optional[Any] = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def A__ ( self ):
_A : str = BasicTokenizer(do_lower_case=A__ ,strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''h\u00E9llo'''] )
def A__ ( self ):
_A : Optional[int] = BasicTokenizer(do_lower_case=A__ ,strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def A__ ( self ):
_A : Dict = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def A__ ( self ):
_A : Union[str, Any] = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A__ ( self ):
_A : int = BasicTokenizer(do_lower_case=A__ ,strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A__ ( self ):
_A : Any = BasicTokenizer(do_lower_case=A__ ,strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A__ ( self ):
_A : Any = BasicTokenizer(do_lower_case=A__ ,never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A__ ( self ):
_A : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
_A : str = {}
for i, token in enumerate(A__ ):
_A : Dict = i
_A : Tuple = WordpieceTokenizer(vocab=A__ ,unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) ,[] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) ,['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) ,['''[UNK]''', '''runn''', '''##ing'''] )
def A__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def A__ ( self ):
_A : Tuple = self.get_tokenizer()
_A : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__ ) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(A__ ) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def A__ ( self ):
_A : Any = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
_A : int = tokenizer.encode('''sequence builders''' ,add_special_tokens=A__ )
_A : Union[str, Any] = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=A__ )
_A : Optional[int] = tokenizer.build_inputs_with_special_tokens(A__ )
_A : str = tokenizer.build_inputs_with_special_tokens(A__ ,A__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def A__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A : int = self.rust_tokenizer_class.from_pretrained(A__ ,**A__ )
_A : Optional[Any] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
_A : Optional[int] = tokenizer_r.encode_plus(
A__ ,return_attention_mask=A__ ,return_token_type_ids=A__ ,return_offsets_mapping=A__ ,add_special_tokens=A__ ,)
_A : List[str] = tokenizer_r.do_lower_case if hasattr(A__ ,'''do_lower_case''' ) else False
_A : int = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens['''offset_mapping'''] )
def A__ ( self ):
_A : List[Any] = ['''的''', '''人''', '''有''']
_A : str = ''''''.join(A__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A : Tuple = True
_A : Optional[Any] = self.tokenizer_class.from_pretrained(A__ ,**A__ )
_A : Optional[int] = self.rust_tokenizer_class.from_pretrained(A__ ,**A__ )
_A : str = tokenizer_p.encode(A__ ,add_special_tokens=A__ )
_A : Optional[int] = tokenizer_r.encode(A__ ,add_special_tokens=A__ )
_A : List[str] = tokenizer_r.convert_ids_to_tokens(A__ )
_A : Optional[int] = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ ,A__ )
self.assertListEqual(A__ ,A__ )
_A : Optional[int] = False
_A : int = self.rust_tokenizer_class.from_pretrained(A__ ,**A__ )
_A : Dict = self.tokenizer_class.from_pretrained(A__ ,**A__ )
_A : Dict = tokenizer_r.encode(A__ ,add_special_tokens=A__ )
_A : Dict = tokenizer_p.encode(A__ ,add_special_tokens=A__ )
_A : str = tokenizer_r.convert_ids_to_tokens(A__ )
_A : Any = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that only the first Chinese character is not preceded by "##".
_A : List[str] = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(A__ )
]
self.assertListEqual(A__ ,A__ )
self.assertListEqual(A__ ,A__ )
| 720
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_UpperCamelCase : Union[str, Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCamelCase : List[Any] =256
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Tuple = ["melgan"]
def __init__( self ,A__ ,A__ ,A__ ,A__ ,A__ ,):
super().__init__()
# From MELGAN
_A : Any = math.log(1E-5 ) # Matches MelGAN training.
_A : int = 4.0 # Largest value for most examples
_A : int = 128
self.register_modules(
notes_encoder=A__ ,continuous_encoder=A__ ,decoder=A__ ,scheduler=A__ ,melgan=A__ ,)
def A__ ( self ,A__ ,A__=(-1.0, 1.0) ,A__=False ):
_A , _A : int = output_range
if clip:
_A : int = torch.clip(A__ ,self.min_value ,self.max_value )
# Scale to [0, 1].
_A : Optional[Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def A__ ( self ,A__ ,A__=(-1.0, 1.0) ,A__=False ):
_A , _A : Dict = input_range
_A : Tuple = torch.clip(A__ ,A__ ,A__ ) if clip else outputs
# Scale to [0, 1].
_A : Any = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def A__ ( self ,A__ ,A__ ,A__ ):
_A : Tuple = input_tokens > 0
_A , _A : str = self.notes_encoder(
encoder_input_tokens=A__ ,encoder_inputs_mask=A__ )
_A , _A : List[str] = self.continuous_encoder(
encoder_inputs=A__ ,encoder_inputs_mask=A__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def A__ ( self ,A__ ,A__ ,A__ ):
_A : str = noise_time
if not torch.is_tensor(A__ ):
_A : Any = torch.tensor([timesteps] ,dtype=torch.long ,device=input_tokens.device )
elif torch.is_tensor(A__ ) and len(timesteps.shape ) == 0:
_A : Union[str, Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_A : int = timesteps * torch.ones(input_tokens.shape[0] ,dtype=timesteps.dtype ,device=timesteps.device )
_A : Dict = self.decoder(
encodings_and_masks=A__ ,decoder_input_tokens=A__ ,decoder_noise_time=A__ )
return logits
@torch.no_grad()
def __call__( self ,A__ ,A__ = None ,A__ = 100 ,A__ = True ,A__ = "numpy" ,A__ = None ,A__ = 1 ,):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ ,A__ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(A__ )}.""" )
_A : Any = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] ,dtype=np.floataa )
_A : Optional[int] = np.zeros([1, 0, self.n_dims] ,np.floataa )
_A : Dict = torch.ones((1, TARGET_FEATURE_LENGTH) ,dtype=A__ ,device=self.device )
for i, encoder_input_tokens in enumerate(A__ ):
if i == 0:
_A : str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device ,dtype=self.decoder.dtype )
# The first chunk has no previous context.
_A : Dict = torch.zeros((1, TARGET_FEATURE_LENGTH) ,dtype=A__ ,device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_A : Optional[int] = ones
_A : Tuple = self.scale_features(
A__ ,output_range=[-1.0, 1.0] ,clip=A__ )
_A : Tuple = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) ,continuous_inputs=A__ ,continuous_mask=A__ ,)
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_A : Any = randn_tensor(
shape=encoder_continuous_inputs.shape ,generator=A__ ,device=self.device ,dtype=self.decoder.dtype ,)
# set step values
self.scheduler.set_timesteps(A__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_A : Union[str, Any] = self.decode(
encodings_and_masks=A__ ,input_tokens=A__ ,noise_time=t / self.scheduler.config.num_train_timesteps ,)
# Compute previous output: x_t -> x_t-1
_A : List[str] = self.scheduler.step(A__ ,A__ ,A__ ,generator=A__ ).prev_sample
_A : Union[str, Any] = self.scale_to_features(A__ ,input_range=[-1.0, 1.0] )
_A : Optional[Any] = mel[:1]
_A : int = mel.cpu().float().numpy()
_A : Optional[Any] = np.concatenate([full_pred_mel, pred_mel[:1]] ,axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ ,A__ )
logger.info('''Generated segment''' ,A__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
_A : Optional[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_A : Dict = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A__ )
| 332
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 252
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _UpperCamelCase :
__lowerCamelCase = BlenderbotSmallConfig
__lowerCamelCase = {}
__lowerCamelCase = "gelu"
def __init__(self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=2_0 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=0 , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def A (self ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ = prepare_blenderbot_small_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, inputs_dict
def A (self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
A__ = TFBlenderbotSmallModel(config=lowerCamelCase__ ).get_decoder()
A__ = inputs_dict["""input_ids"""]
A__ = input_ids[:1, :]
A__ = inputs_dict["""attention_mask"""][:1, :]
A__ = inputs_dict["""head_mask"""]
A__ = 1
# first forward pass
A__ = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , head_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ )
A__ ,A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
A__ = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ = output_from_no_past[:, -3:, random_slice_idx]
A__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase__ , lowerCamelCase__ , rtol=1E-3 )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : int=None , UpperCamelCase : Dict=None , ):
if attention_mask is None:
A__ = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCamelCase ( __snake_case , __snake_case , unittest.TestCase):
__lowerCamelCase = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__lowerCamelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
def A (self ):
"""simple docstring"""
A__ = TFBlenderbotSmallModelTester(self )
A__ = ConfigTester(self , config_class=lowerCamelCase__ )
def A (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A (self ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase__ )
@require_tokenizers
@require_tf
class _UpperCamelCase ( unittest.TestCase):
__lowerCamelCase = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
__lowerCamelCase = "facebook/blenderbot_small-90M"
@cached_property
def A (self ):
"""simple docstring"""
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
@cached_property
def A (self ):
"""simple docstring"""
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def A (self ):
"""simple docstring"""
A__ = self.tokenizer(self.src_text , return_tensors="""tf""" )
A__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowerCamelCase__ , )
A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCamelCase__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 574
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a_ : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """The column name of the images in the files."""} )
_a = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """A folder containing the training data."""} )
_a = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """A folder containing the validation data."""} )
_a = field(
default=0.1_5 , metadata={"""help""": """Percent to split off of train for validation."""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = {}
if self.train_dir is not None:
__magic_name__ = self.train_dir
if self.validation_dir is not None:
__magic_name__ = self.validation_dir
__magic_name__ = data_files if data_files else None
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_a = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_a = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Name or path of preprocessor config."""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_a = field(
default=0.7_5 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
_a = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_a = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict ):
__magic_name__ = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def _SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__magic_name__ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__magic_name__ , __magic_name__ , __magic_name__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__magic_name__ , __magic_name__ , __magic_name__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , snake_case_ , snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__magic_name__ = training_args.get_process_log_level()
logger.setLevel(snake_case_ )
transformers.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__magic_name__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__magic_name__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
__magic_name__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__magic_name__ = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case_ ) and data_args.train_val_split > 0.0:
__magic_name__ = ds['''train'''].train_test_split(data_args.train_val_split )
__magic_name__ = split['''train''']
__magic_name__ = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__magic_name__ = ViTMAEConfig.from_pretrained(model_args.config_name , **snake_case_ )
elif model_args.model_name_or_path:
__magic_name__ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **snake_case_ )
else:
__magic_name__ = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__magic_name__ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case_ )
elif model_args.model_name_or_path:
__magic_name__ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case_ )
else:
__magic_name__ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__magic_name__ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
__magic_name__ = ViTMAEForPreTraining(snake_case_ )
if training_args.do_train:
__magic_name__ = ds['''train'''].column_names
else:
__magic_name__ = ds['''validation'''].column_names
if data_args.image_column_name is not None:
__magic_name__ = data_args.image_column_name
elif "image" in column_names:
__magic_name__ = '''image'''
elif "img" in column_names:
__magic_name__ = '''img'''
else:
__magic_name__ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__magic_name__ = image_processor.size['''shortest_edge''']
else:
__magic_name__ = (image_processor.size['''height'''], image_processor.size['''width'''])
__magic_name__ = Compose(
[
Lambda(lambda snake_case_ : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(snake_case_ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(snake_case_ : int ):
__magic_name__ = [transforms(snake_case_ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
__magic_name__ = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
__magic_name__ = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case_ )
# Compute absolute learning rate
__magic_name__ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__magic_name__ = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
__magic_name__ = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case_ , data_collator=snake_case_ , )
# Training
if training_args.do_train:
__magic_name__ = None
if training_args.resume_from_checkpoint is not None:
__magic_name__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__magic_name__ = last_checkpoint
__magic_name__ = trainer.train(resume_from_checkpoint=snake_case_ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__magic_name__ = trainer.evaluate()
trainer.log_metrics('''eval''' , snake_case_ )
trainer.save_metrics('''eval''' , snake_case_ )
# Write model card and (optionally) push to hub
__magic_name__ = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case_ )
else:
trainer.create_model_card(**snake_case_ )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 678
|
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
__magic_name__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__magic_name__ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
__magic_name__ = f'{src_lang}-{tgt_lang}'
__magic_name__ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=snake_case_ , exist_ok=snake_case_ )
__magic_name__ = os.path.join(snake_case_ , '''README.md''' )
print(f'Generating {path}' )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case_ )
# make sure we are under the root of the project
a_ : Tuple = Path(__file__).resolve().parent.parent.parent
a_ : Dict = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ : List[str] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 678
| 1
|
import math
def UpperCamelCase_( _snake_case : int = 100 ):
"""simple docstring"""
__a =sum(i * i for i in range(1 , n + 1 ) )
__a =int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 242
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(__UpperCamelCase )
A = -1
A = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCamelCase )
A = model.generate(__UpperCamelCase , max_new_tokens=10 , do_sample=__UpperCamelCase )
A = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A = TextStreamer(__UpperCamelCase )
model.generate(__UpperCamelCase , max_new_tokens=10 , do_sample=__UpperCamelCase , streamer=__UpperCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A = cs.out[:-1]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( self : int ) -> List[Any]:
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(__UpperCamelCase )
A = -1
A = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCamelCase )
A = model.generate(__UpperCamelCase , max_new_tokens=10 , do_sample=__UpperCamelCase )
A = tokenizer.decode(greedy_ids[0] )
A = TextIteratorStreamer(__UpperCamelCase )
A = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
A = Thread(target=model.generate , kwargs=__UpperCamelCase )
thread.start()
A = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( self : int ) -> Tuple:
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(__UpperCamelCase )
A = -1
A = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCamelCase )
A = model.generate(__UpperCamelCase , max_new_tokens=10 , do_sample=__UpperCamelCase )
A = greedy_ids[:, input_ids.shape[1] :]
A = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A = TextStreamer(__UpperCamelCase , skip_prompt=__UpperCamelCase )
model.generate(__UpperCamelCase , max_new_tokens=10 , do_sample=__UpperCamelCase , streamer=__UpperCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A = cs.out[:-1]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
A = AutoTokenizer.from_pretrained('distilgpt2' )
A = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(__UpperCamelCase )
A = -1
A = torch.ones((1, 5) , device=__UpperCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A = TextStreamer(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
model.generate(__UpperCamelCase , max_new_tokens=1 , do_sample=__UpperCamelCase , streamer=__UpperCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A = cs.out[:-1] # Remove the final "\n"
A = tokenizer(__UpperCamelCase , return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __UpperCamelCase ( self : Any ) -> Dict:
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
A = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(__UpperCamelCase )
A = -1
A = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCamelCase )
A = TextIteratorStreamer(__UpperCamelCase , timeout=0.0_0_1 )
A = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
A = Thread(target=model.generate , kwargs=__UpperCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__UpperCamelCase ):
A = ''
for new_text in streamer:
streamer_text += new_text
| 106
| 0
|
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase = [[0] * n for i in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = y_points[i]
for i in range(2 , SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393
| 0
|
"""simple docstring"""
import argparse
import os
import re
_a : Dict = """src/diffusers"""
# Pattern that looks at the indentation in a line.
_a : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
_a : Optional[Any] = re.compile(r'^\s*\"([^\"]+)\":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_a : Any = re.compile(r'^\s*_import_structure\[\"([^\"]+)\"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
_a : Dict = re.compile(r'^\s*\"([^\"]+)\",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_a : Dict = re.compile(r'\[([^\]]+)\]')
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> Union[str, Any]:
_lowerCAmelCase : Any = _re_indent.search(__lowerCamelCase )
return "" if search is None else search.groups()[0]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : Any="" ,_lowerCamelCase : List[str]=None ,_lowerCamelCase : int=None ) -> Optional[int]:
_lowerCAmelCase : int = 0
_lowerCAmelCase : List[str] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(__lowerCamelCase ):
index += 1
_lowerCAmelCase : Optional[Any] = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Optional[Any] = [lines[index]]
index += 1
while index < len(__lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(__lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(__lowerCamelCase ) )
if index < len(__lowerCamelCase ) - 1:
_lowerCAmelCase : Optional[Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Union[str, Any] = []
else:
blocks.append("""\n""".join(__lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__lowerCamelCase ) > 0:
blocks.append("""\n""".join(__lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> Dict:
def _inner(_lowerCamelCase : Any ):
return key(__lowerCamelCase ).lower().replace("""_""" ,"""""" )
return _inner
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any]=None ) -> Any:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : int ):
return x
if key is None:
_lowerCAmelCase : Dict = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : str = [obj for obj in objects if key(__lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Optional[Any] = [obj for obj in objects if key(__lowerCamelCase )[0].isupper() and not key(__lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if not key(__lowerCamelCase )[0].isupper()]
_lowerCAmelCase : Any = ignore_underscore(__lowerCamelCase )
return sorted(__lowerCamelCase ,key=__lowerCamelCase ) + sorted(__lowerCamelCase ,key=__lowerCamelCase ) + sorted(__lowerCamelCase ,key=__lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : str ):
_lowerCAmelCase : List[Any] = match.groups()[0]
if "," not in imports:
return f"[{imports}]"
_lowerCAmelCase : Any = [part.strip().replace("""\"""" ,"""""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Optional[int] = keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(__lowerCamelCase )] ) + "]"
_lowerCAmelCase : Any = import_statement.split("""\n""" )
if len(__lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Optional[int] = [(i, _re_strip_line.search(__lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Dict = sort_objects(__lowerCamelCase ,key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace ,lines[1] )
else:
_lowerCAmelCase : Optional[Any] = [part.strip().replace("""\"""" ,"""""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Optional[int] = keys[:-1]
_lowerCAmelCase : str = get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(__lowerCamelCase )] )
return "\n".join(__lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : str = _re_bracket_content.sub(_replace ,__lowerCamelCase )
return import_statement
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Optional[Any]=True ) -> List[Any]:
with open(__lowerCamelCase ,"""r""" ) as f:
_lowerCAmelCase : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Union[str, Any] = split_code_in_indented_blocks(
__lowerCamelCase ,start_prompt="""_import_structure = {""" ,end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 ,len(__lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : str = main_blocks[block_idx]
_lowerCAmelCase : Optional[Any] = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : List[str] = 0
while line_idx < len(__lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Dict = len(__lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(__lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Dict = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Optional[int] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Optional[int] = split_code_in_indented_blocks(__lowerCamelCase ,indent_level=__lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : int = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : str = [(pattern.search(__lowerCamelCase ).groups()[0] if pattern.search(__lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[Any] = [(i, key) for i, key in enumerate(__lowerCamelCase ) if key is not None]
_lowerCAmelCase : Any = [x[0] for x in sorted(__lowerCamelCase ,key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : List[str] = []
for i in range(len(__lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Optional[int] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : Union[str, Any] = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__lowerCamelCase ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(__lowerCamelCase ,"""w""" ) as f:
f.write("""\n""".join(__lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str=True ) -> Dict:
_lowerCAmelCase : Optional[int] = []
for root, _, files in os.walk(__lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[int] = sort_imports(os.path.join(__lowerCamelCase ,"""__init__.py""" ) ,check_only=__lowerCamelCase )
if result:
_lowerCAmelCase : Optional[int] = [os.path.join(__lowerCamelCase ,"""__init__.py""" )]
if len(__lowerCamelCase ) > 0:
raise ValueError(f"Would overwrite {len(__lowerCamelCase )} files, run `make style`." )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_a : Dict = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 213
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def lowerCAmelCase( __lowerCamelCase ):
__a = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
f'''{test_file} instead.''' )
__a = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(f'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
f'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
__a = components[:-1] + [test_fn.replace('.py' , '' )]
__a = '.'.join(__lowerCamelCase )
return test_module_path
def lowerCAmelCase( __lowerCamelCase ):
__a = get_module_path(__lowerCamelCase )
__a = importlib.import_module(__lowerCamelCase )
return test_module
def lowerCAmelCase( __lowerCamelCase ):
__a = []
__a = get_test_module(__lowerCamelCase )
for attr in dir(__lowerCamelCase ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(__lowerCamelCase , __lowerCamelCase ) )
# sort with class names
return sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x.__name__ )
def lowerCAmelCase( __lowerCamelCase ):
__a = []
__a = get_test_module(__lowerCamelCase )
for attr in dir(__lowerCamelCase ):
__a = getattr(__lowerCamelCase , __lowerCamelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__a = getattr(__lowerCamelCase , 'all_model_classes' , [] )
if len(__lowerCamelCase ) > 0:
test_classes.append(__lowerCamelCase )
# sort with class names
return sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x.__name__ )
def lowerCAmelCase( __lowerCamelCase ):
__a = get_test_classes(__lowerCamelCase )
__a = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x.__name__ )
def lowerCAmelCase( __lowerCamelCase ):
__a = test_class()
if hasattr(__lowerCamelCase , 'setUp' ):
test.setUp()
__a = None
if hasattr(__lowerCamelCase , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__a = test.model_tester.__class__
return model_tester
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
__a = get_test_classes(__lowerCamelCase )
__a = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__lowerCamelCase )
# sort with class names
return sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x.__name__ )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
__a = get_test_classes_for_model(__lowerCamelCase , __lowerCamelCase )
__a = []
for test_class in test_classes:
__a = get_model_tester_from_test_class(__lowerCamelCase )
if tester_class is not None:
tester_classes.append(__lowerCamelCase )
# sort with class names
return sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x.__name__ )
def lowerCAmelCase( __lowerCamelCase ):
__a = get_test_classes(__lowerCamelCase )
__a = {test_class: get_model_tester_from_test_class(__lowerCamelCase ) for test_class in test_classes}
return test_tester_mapping
def lowerCAmelCase( __lowerCamelCase ):
__a = get_model_classes(__lowerCamelCase )
__a = {
model_class: get_test_classes_for_model(__lowerCamelCase , __lowerCamelCase ) for model_class in model_classes
}
return model_test_mapping
def lowerCAmelCase( __lowerCamelCase ):
__a = get_model_classes(__lowerCamelCase )
__a = {
model_class: get_tester_classes_for_model(__lowerCamelCase , __lowerCamelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCAmelCase( __lowerCamelCase ):
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return o
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return o.__name__
elif isinstance(__lowerCamelCase , (list, tuple) ):
return [to_json(__lowerCamelCase ) for x in o]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return {to_json(__lowerCamelCase ): to_json(__lowerCamelCase ) for k, v in o.items()}
else:
return o
| 559
| 0
|
import math
class UpperCamelCase :
def __init__( self : List[str] , snake_case__ : Optional[int]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
SCREAMING_SNAKE_CASE = n
SCREAMING_SNAKE_CASE = [
[math.inf for j in range(0 , snake_case__ )] for i in range(0 , snake_case__ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE = [
[math.inf for j in range(0 , snake_case__ )] for i in range(0 , snake_case__ )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = w
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
a_ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 673
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a_ : List[Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =LEDTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Dict="replace" , snake_case__ : Tuple="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : List[Any]=False , snake_case__ : int=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(snake_case__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(snake_case__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 673
| 1
|
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase ) -> list[int]:
"""simple docstring"""
return [ord(_UpperCamelCase ) - 96 for elem in plain]
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
snake_case_ : List[Any] = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , _UpperCamelCase )
print('''Decoded:''' , decode(_UpperCamelCase ) )
if __name__ == "__main__":
main()
| 60
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ) -> Optional[int]:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def a__ (self , A , A , A , A , A , A , A ) -> Any:
"""simple docstring"""
_a = OpenLlamaModel(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A )
_a = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Any:
"""simple docstring"""
_a = True
_a = OpenLlamaModel(A )
model.to(A )
model.eval()
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
_a = model(
A , attention_mask=A , encoder_hidden_states=A , )
_a = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Tuple:
"""simple docstring"""
_a = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Dict:
"""simple docstring"""
_a = True
_a = True
_a = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )['''hidden_states'''][0]
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )['''hidden_states'''][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( A , A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__lowerCamelCase : Any = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = OpenLlamaModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''single_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''multi_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def a__ (self , A ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = ids_tensor([1, 10] , config.vocab_size )
_a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
_a = original_model(A ).last_hidden_state
_a = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = {'''type''': scaling_type, '''factor''': 10.0}
_a = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
_a = scaled_model(A ).last_hidden_state
_a = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
| 11
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : float = 0 ):
lowerCAmelCase , lowerCAmelCase = row, column
lowerCAmelCase = [[default_value for c in range(lowerCAmelCase )] for r in range(lowerCAmelCase )]
def __str__( self : Dict ):
lowerCAmelCase = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase = max(lowerCAmelCase , len(str(lowerCAmelCase ) ) )
lowerCAmelCase = f'''%{max_element_length}s'''
# Make string and return
def single_line(lowerCAmelCase : list[float] ) -> str:
nonlocal string_format_identifier
lowerCAmelCase = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCAmelCase ) for row_vector in self.array )
return s
def __repr__( self : List[Any] ):
return str(self )
def __lowercase ( self : Dict , lowerCAmelCase : tuple[int, int] ):
if not (isinstance(lowerCAmelCase , (list, tuple) ) and len(lowerCAmelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int , lowerCAmelCase : tuple[int, int] ):
assert self.validate_indicies(lowerCAmelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple , lowerCAmelCase : tuple[int, int] , lowerCAmelCase : float ):
assert self.validate_indicies(lowerCAmelCase )
lowerCAmelCase = value
def __add__( self : Any , lowerCAmelCase : Matrix ):
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self : List[str] ):
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = -self[r, c]
return result
def __sub__( self : str , lowerCAmelCase : Matrix ):
return self + (-another)
def __mul__( self : Optional[Any] , lowerCAmelCase : int | float | Matrix ):
if isinstance(lowerCAmelCase , (int, float) ): # Scalar multiplication
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] * another
return result
elif isinstance(lowerCAmelCase , lowerCAmelCase ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase = f'''Unsupported type given for another ({type(lowerCAmelCase )})'''
raise TypeError(lowerCAmelCase )
def __lowercase ( self : Tuple ):
lowerCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c]
return result
def __lowercase ( self : Any , lowerCAmelCase : Matrix , lowerCAmelCase : Matrix ):
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase = v.transpose()
lowerCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowercase () -> None:
'''simple docstring'''
lowerCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 2, -3
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(snake_case__ , snake_case__ )}''' )
def lowercase () -> None:
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 529
|
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'autoformer'
_a = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Dict , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : str = "student_t" , lowerCAmelCase : str = "nll" , lowerCAmelCase : int = 1 , lowerCAmelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase : bool = True , lowerCAmelCase : int = 0 , lowerCAmelCase : int = 0 , lowerCAmelCase : int = 0 , lowerCAmelCase : int = 0 , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : int = 64 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 32 , lowerCAmelCase : int = 32 , lowerCAmelCase : str = "gelu" , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : int = 100 , lowerCAmelCase : float = 0.02 , lowerCAmelCase : bool = True , lowerCAmelCase : Tuple=True , lowerCAmelCase : int = 10 , lowerCAmelCase : int = 25 , lowerCAmelCase : int = 3 , **lowerCAmelCase : Tuple , ):
# time series specific configuration
lowerCAmelCase = prediction_length
lowerCAmelCase = context_length if context_length is not None else prediction_length
lowerCAmelCase = distribution_output
lowerCAmelCase = loss
lowerCAmelCase = input_size
lowerCAmelCase = num_time_features
lowerCAmelCase = lags_sequence
lowerCAmelCase = scaling
lowerCAmelCase = num_dynamic_real_features
lowerCAmelCase = num_static_real_features
lowerCAmelCase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCAmelCase = cardinality
else:
lowerCAmelCase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCAmelCase = embedding_dimension
else:
lowerCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase = d_model
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = decoder_layers
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = use_cache
# Autoformer
lowerCAmelCase = label_length
lowerCAmelCase = moving_average
lowerCAmelCase = autocorrelation_factor
super().__init__(is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase )
@property
def __lowercase ( self : Tuple ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 529
| 1
|
from ....utils import logging
__lowerCAmelCase : Any =logging.get_logger(__name__)
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Any=2_048 ) -> List[Any]:
__SCREAMING_SNAKE_CASE : int = config.__dict__
__SCREAMING_SNAKE_CASE : Dict = modal_hidden_size
if num_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
| 696
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''transfo-xl'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''mems''']
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :str , lowerCAmelCase__ :Optional[int]=267_735 , lowerCAmelCase__ :Optional[int]=[20_000, 40_000, 200_000] , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :List[str]=1_024 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Tuple=64 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Dict=18 , lowerCAmelCase__ :Union[str, Any]=1_600 , lowerCAmelCase__ :Union[str, Any]=1_000 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=0 , lowerCAmelCase__ :Union[str, Any]=-1 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :str="normal" , lowerCAmelCase__ :Tuple=0.01 , lowerCAmelCase__ :Union[str, Any]=0.01 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Union[str, Any]=0 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = []
self.cutoffs.extend(lowerCAmelCase__ )
if proj_share_all_but_first:
__SCREAMING_SNAKE_CASE : List[str] = [False] + [True] * len(self.cutoffs )
else:
__SCREAMING_SNAKE_CASE : Tuple = [False] + [False] * len(self.cutoffs )
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_embed
__SCREAMING_SNAKE_CASE : Tuple = d_head
__SCREAMING_SNAKE_CASE : Dict = d_inner
__SCREAMING_SNAKE_CASE : Optional[Any] = div_val
__SCREAMING_SNAKE_CASE : Optional[Any] = pre_lnorm
__SCREAMING_SNAKE_CASE : List[str] = n_layer
__SCREAMING_SNAKE_CASE : int = n_head
__SCREAMING_SNAKE_CASE : str = mem_len
__SCREAMING_SNAKE_CASE : Union[str, Any] = same_length
__SCREAMING_SNAKE_CASE : str = attn_type
__SCREAMING_SNAKE_CASE : Dict = clamp_len
__SCREAMING_SNAKE_CASE : Tuple = sample_softmax
__SCREAMING_SNAKE_CASE : Optional[int] = adaptive
__SCREAMING_SNAKE_CASE : int = dropout
__SCREAMING_SNAKE_CASE : Optional[Any] = dropatt
__SCREAMING_SNAKE_CASE : int = untie_r
__SCREAMING_SNAKE_CASE : Optional[int] = init
__SCREAMING_SNAKE_CASE : List[str] = init_range
__SCREAMING_SNAKE_CASE : Any = proj_init_std
__SCREAMING_SNAKE_CASE : List[str] = init_std
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__( self :str ) -> int:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __magic_name__( self :Tuple , lowerCAmelCase__ :int ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 696
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''cvt'''
def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : Optional[Any] = num_channels
lowerCamelCase_ : str = patch_sizes
lowerCamelCase_ : List[Any] = patch_stride
lowerCamelCase_ : str = patch_padding
lowerCamelCase_ : str = embed_dim
lowerCamelCase_ : Union[str, Any] = num_heads
lowerCamelCase_ : Optional[Any] = depth
lowerCamelCase_ : int = mlp_ratio
lowerCamelCase_ : Union[str, Any] = attention_drop_rate
lowerCamelCase_ : Optional[Any] = drop_rate
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : Union[str, Any] = qkv_bias
lowerCamelCase_ : int = cls_token
lowerCamelCase_ : int = qkv_projection_method
lowerCamelCase_ : int = kernel_qkv
lowerCamelCase_ : Optional[Any] = padding_kv
lowerCamelCase_ : Optional[int] = stride_kv
lowerCamelCase_ : Optional[int] = padding_q
lowerCamelCase_ : List[Any] = stride_q
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = layer_norm_eps
| 73
|
__magic_name__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602_176_634E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase_ : List[Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(lowerCAmelCase_)}"""
)
raise ValueError(lowerCAmelCase_)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : Tuple = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[int] = "megatron-bert"
def __init__( self : Union[str, Any] , _lowercase : str=2_90_56 , _lowercase : int=10_24 , _lowercase : Any=24 , _lowercase : Optional[int]=16 , _lowercase : Union[str, Any]=40_96 , _lowercase : Union[str, Any]="gelu" , _lowercase : Tuple=0.1 , _lowercase : Dict=0.1 , _lowercase : str=5_12 , _lowercase : Optional[int]=2 , _lowercase : List[Any]=0.02 , _lowercase : List[Any]=1E-12 , _lowercase : Any=0 , _lowercase : Dict="absolute" , _lowercase : Optional[int]=True , **_lowercase : Optional[Any] , ):
super().__init__(pad_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
| 49
|
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ : Any = 'http://www.mocksite.com/file1.txt'
lowerCAmelCase_ : List[str] = '"text": ["foo", "foo"]'
lowerCAmelCase_ : Any = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class lowerCamelCase_ :
_lowerCAmelCase : Dict = 2_0_0
_lowerCAmelCase : int = {'Content-Length': '100'}
_lowerCAmelCase : Optional[Any] = {}
def __lowercase ( self : str , **lowerCAmelCase__ : Any ):
"""simple docstring"""
return [bytes(lowerCAmelCase__ , '''utf-8''' )]
def UpperCAmelCase ( *A : int , **A : Optional[Any] ):
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def UpperCAmelCase ( A : int , A : Union[str, Any] , A : Union[str, Any] ):
import requests
monkeypatch.setattr(A , '''request''' , A )
SCREAMING_SNAKE_CASE : Optional[Any] = URL
if issubclass(A , A ):
SCREAMING_SNAKE_CASE : List[Any] = url
elif issubclass(A , A ):
SCREAMING_SNAKE_CASE : List[Any] = [url]
elif issubclass(A , A ):
SCREAMING_SNAKE_CASE : str = {'''train''': url}
SCREAMING_SNAKE_CASE : Any = '''dummy'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''downloads'''
SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path
SCREAMING_SNAKE_CASE : List[str] = DownloadConfig(
cache_dir=os.path.join(A , A ) , use_etag=A , )
SCREAMING_SNAKE_CASE : Union[str, Any] = DownloadManager(dataset_name=A , download_config=A )
SCREAMING_SNAKE_CASE : List[str] = dl_manager.download(A )
SCREAMING_SNAKE_CASE : Dict = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(A , A ):
SCREAMING_SNAKE_CASE : List[str] = [downloaded_paths]
SCREAMING_SNAKE_CASE : int = [urls]
elif isinstance(A , A ):
assert "train" in downloaded_paths.keys()
SCREAMING_SNAKE_CASE : Optional[int] = downloaded_paths.values()
SCREAMING_SNAKE_CASE : Union[str, Any] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(A , A ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
SCREAMING_SNAKE_CASE : Dict = Path(A )
SCREAMING_SNAKE_CASE : Optional[Any] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
SCREAMING_SNAKE_CASE : str = downloaded_path.read_text()
assert content == CONTENT
SCREAMING_SNAKE_CASE : str = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def UpperCAmelCase ( A : Tuple , A : Union[str, Any] , A : Dict ):
SCREAMING_SNAKE_CASE : List[str] = str(A )
if issubclass(A , A ):
SCREAMING_SNAKE_CASE : Optional[int] = filename
elif issubclass(A , A ):
SCREAMING_SNAKE_CASE : Optional[int] = [filename]
elif issubclass(A , A ):
SCREAMING_SNAKE_CASE : List[Any] = {'''train''': filename}
SCREAMING_SNAKE_CASE : Dict = '''dummy'''
SCREAMING_SNAKE_CASE : Optional[Any] = xz_file.parent
SCREAMING_SNAKE_CASE : List[Any] = '''extracted'''
SCREAMING_SNAKE_CASE : Optional[Any] = DownloadConfig(
cache_dir=A , use_etag=A , )
SCREAMING_SNAKE_CASE : Optional[int] = DownloadManager(dataset_name=A , download_config=A )
SCREAMING_SNAKE_CASE : Optional[int] = dl_manager.extract(A )
SCREAMING_SNAKE_CASE : Tuple = paths
for extracted_paths in [extracted_paths]:
if isinstance(A , A ):
SCREAMING_SNAKE_CASE : List[Any] = [extracted_paths]
SCREAMING_SNAKE_CASE : str = [paths]
elif isinstance(A , A ):
assert "train" in extracted_paths.keys()
SCREAMING_SNAKE_CASE : Optional[int] = extracted_paths.values()
SCREAMING_SNAKE_CASE : Union[str, Any] = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(A , A ):
assert extracted_path == dl_manager.extracted_paths[input_path]
SCREAMING_SNAKE_CASE : Any = Path(A )
SCREAMING_SNAKE_CASE : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(A , etag=A )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
SCREAMING_SNAKE_CASE : int = extracted_path.read_text()
SCREAMING_SNAKE_CASE : Optional[int] = text_file.read_text()
assert extracted_file_content == expected_file_content
def UpperCAmelCase ( A : Optional[int] , A : int ):
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(A , start=1 ):
SCREAMING_SNAKE_CASE : List[Any] = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def UpperCAmelCase ( A : Any , A : str ):
SCREAMING_SNAKE_CASE : Union[str, Any] = request.getfixturevalue(A )
SCREAMING_SNAKE_CASE : List[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(A ) , start=1 ):
_test_jsonl(A , A )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def UpperCAmelCase ( A : Dict , A : Optional[Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = request.getfixturevalue(A )
SCREAMING_SNAKE_CASE : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(A ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(A ) , start=1 ):
_test_jsonl(A , A )
assert num_tar == 1
assert num_jsonl == 2
def UpperCAmelCase ( A : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(A ) , start=1 ):
assert os.path.basename(A ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 527
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 539
|
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
SCREAMING_SNAKE_CASE__ = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCamelCase ( _snake_case : int ,_snake_case : Union[str, Any] ,_snake_case : Any ,_snake_case : Optional[int] ,_snake_case : List[str] ):
'''simple docstring'''
for attribute in key.split("." ):
lowercase__ = getattr(_snake_case ,_snake_case )
if weight_type is not None:
lowercase__ = getattr(_snake_case ,_snake_case ).shape
else:
lowercase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCamelCase ( _snake_case : Any ,_snake_case : int ):
'''simple docstring'''
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
_snake_case ,_snake_case ,_snake_case ,_snake_case ,hf_model.config.feat_extract_norm == "group" ,)
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(_snake_case )[0].split("." )[-2]
lowercase__ = mapped_key.replace("*" ,_snake_case )
if "weight_g" in name:
lowercase__ = "weight_g"
elif "weight_v" in name:
lowercase__ = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
lowercase__ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ = "weight"
else:
lowercase__ = None
set_recursively(_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowerCamelCase ( _snake_case : str ,_snake_case : int ,_snake_case : Optional[Any] ,_snake_case : Tuple ,_snake_case : List[Any] ):
'''simple docstring'''
lowercase__ = full_name.split("conv_layers." )[-1]
lowercase__ = name.split("." )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowerCamelCase ( _snake_case : Optional[int] ,_snake_case : Any ,_snake_case : List[Any]=None ):
'''simple docstring'''
lowercase__ = torch.load(_snake_case )
lowercase__ = WavLMConfigOrig(checkpoint["cfg"] )
lowercase__ = WavLMOrig(_snake_case )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
lowercase__ = WavLMConfig.from_pretrained(_snake_case )
else:
lowercase__ = WavLMConfig()
lowercase__ = WavLMModel(_snake_case )
recursively_load_weights(_snake_case ,_snake_case )
hf_wavlm.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 539
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
lowerCamelCase__ = {
'''gpt-neox-20b''': 2048,
}
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ =VOCAB_FILES_NAMES
lowerCAmelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ =['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
snake_case__ : str =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
snake_case__ : List[str] =getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
snake_case__ : Dict =add_prefix_space
snake_case__ : Optional[Any] =pre_tok_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =add_prefix_space
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
snake_case__ : Optional[int] =self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[int]:
"""simple docstring"""
snake_case__ : str =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(__SCREAMING_SNAKE_CASE ) > self.model_max_length:
snake_case__ : int =input_ids[-self.model_max_length :]
return input_ids
| 381
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) -> Any:
"""simple docstring"""
snake_case__ : List[Any] =parent
snake_case__ : Union[str, Any] =batch_size
snake_case__ : List[str] =seq_length
snake_case__ : List[str] =is_training
snake_case__ : str =use_input_mask
snake_case__ : int =use_token_type_ids
snake_case__ : int =use_labels
snake_case__ : Union[str, Any] =vocab_size
snake_case__ : Dict =hidden_size
snake_case__ : Any =num_hidden_layers
snake_case__ : Any =num_attention_heads
snake_case__ : Optional[int] =intermediate_multiple_size
snake_case__ : List[str] =hidden_act
snake_case__ : List[str] =hidden_dropout
snake_case__ : Optional[int] =attention_dropout
snake_case__ : Union[str, Any] =weight_tying
snake_case__ : List[str] =max_position_embeddings
snake_case__ : Any =type_vocab_size
snake_case__ : Optional[Any] =type_sequence_label_size
snake_case__ : List[str] =initializer_range
snake_case__ : Tuple =num_labels
snake_case__ : Tuple =num_choices
snake_case__ : List[Any] =scope
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : List[str] =None
if self.use_input_mask:
snake_case__ : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : int =None
if self.use_labels:
snake_case__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Tuple =self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
snake_case__, snake_case__, snake_case__, snake_case__ : int =self.prepare_config_and_inputs()
snake_case__ : Optional[int] =True
return config, input_ids, input_mask, token_labels
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
snake_case__ : Dict =GPTNeoXJapaneseModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Dict =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
snake_case__ : Optional[int] =True
snake_case__ : Dict =GPTNeoXJapaneseModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[int] =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple =GPTNeoXJapaneseForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Tuple =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
snake_case__ : Tuple =True
snake_case__ : Tuple =GPTNeoXJapaneseForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
snake_case__ : List[str] =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : str =ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : Dict =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ : List[str] =torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : str =torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ : Union[str, Any] =model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =output_from_no_past['''hidden_states'''][0]
snake_case__ : Dict =model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
# select random slice
snake_case__ : Dict =ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : List[Any] =output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Optional[Any] =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] =self.prepare_config_and_inputs()
snake_case__, snake_case__, snake_case__, snake_case__ : str =config_and_inputs
snake_case__ : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ =(GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCAmelCase__ =(GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ =(
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCAmelCase__ =False
lowerCAmelCase__ =False
lowerCAmelCase__ =False
lowerCAmelCase__ =False
def UpperCAmelCase ( self ) -> int:
"""simple docstring"""
snake_case__ : Optional[Any] =GPTNeoXJapaneseModelTester(self )
snake_case__ : List[str] =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__, snake_case__, snake_case__, snake_case__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> str:
"""simple docstring"""
snake_case__, snake_case__, snake_case__, snake_case__ : List[str] =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> str:
"""simple docstring"""
snake_case__, snake_case__, snake_case__, snake_case__ : List[str] =self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case__ : Optional[int] =None
self.model_tester.create_and_check_model_as_decoder(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
snake_case__, snake_case__, snake_case__, snake_case__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : Union[str, Any] ='''abeja/gpt-neox-japanese-2.7b'''
snake_case__ : str =['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
snake_case__ : Optional[int] =[
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
snake_case__ : Any =GPTNeoXJapaneseTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =GPTNeoXJapaneseForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] =[]
for prompt in prompts:
snake_case__ : Union[str, Any] =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids
snake_case__ : Optional[Any] =model.generate(__SCREAMING_SNAKE_CASE , max_length=50 )
snake_case__ : str =tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
predicted_outputs += generated_string
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 381
| 1
|
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
lowercase = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
lowercase = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __a ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase = AudioClassificationPipeline(model=__lowerCamelCase , feature_extractor=__lowerCamelCase )
# test with a raw waveform
lowercase = np.zeros((3_40_00,) )
lowercase = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def __a ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Dict ) -> Dict:
'''simple docstring'''
lowercase ,lowercase = examples
lowercase = audio_classifier(__lowerCamelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
__lowerCamelCase , [
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
] , )
lowercase = audio_classifier(__lowerCamelCase , top_k=1 )
self.assertEqual(
__lowerCamelCase , [
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
] , )
self.run_torchaudio(__lowerCamelCase )
@require_torchaudio
def __a ( self : Dict , __lowerCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
import datasets
# test with a local file
lowercase = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
lowercase = dataset[0]['''audio''']['''array''']
lowercase = audio_classifier(__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
] , )
@require_torch
def __a ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase = '''anton-l/wav2vec2-random-tiny-classifier'''
lowercase = pipeline('''audio-classification''' , model=__lowerCamelCase )
lowercase = np.ones((80_00,) )
lowercase = audio_classifier(__lowerCamelCase , top_k=4 )
lowercase = [
{'''score''': 0.0842, '''label''': '''no'''},
{'''score''': 0.0838, '''label''': '''up'''},
{'''score''': 0.0837, '''label''': '''go'''},
{'''score''': 0.0834, '''label''': '''right'''},
]
lowercase = [
{'''score''': 0.0845, '''label''': '''stop'''},
{'''score''': 0.0844, '''label''': '''on'''},
{'''score''': 0.0841, '''label''': '''right'''},
{'''score''': 0.0834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(__lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowercase = {'''array''': np.ones((80_00,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
lowercase = audio_classifier(__lowerCamelCase , top_k=4 )
self.assertIn(nested_simplify(__lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __a ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
import datasets
lowercase = '''superb/wav2vec2-base-superb-ks'''
lowercase = pipeline('''audio-classification''' , model=__lowerCamelCase )
lowercase = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
lowercase = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
lowercase = audio_classifier(__lowerCamelCase , top_k=4 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=3 ) , [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def __a ( self : List[str] ) -> List[Any]:
'''simple docstring'''
pass
| 702
|
import cmath
import math
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> complex:
"""simple docstring"""
lowercase = math.radians(UpperCAmelCase )
lowercase = math.radians(UpperCAmelCase )
# Convert voltage and current to rectangular form
lowercase = cmath.rect(UpperCAmelCase, UpperCAmelCase )
lowercase = cmath.rect(UpperCAmelCase, UpperCAmelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 479
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
_a = 42
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : Union[str, Any] , lowerCAmelCase : str = 32 , lowerCAmelCase : Dict = 64 , lowerCAmelCase : Union[str, Any] = 20 , lowerCAmelCase : List[Any] = 768 , lowerCAmelCase : List[Any]=77 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : Optional[Any] = 0.0 , lowerCAmelCase : str = "silu" , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : int = None , lowerCAmelCase : int = "linear" , lowerCAmelCase : str = "prd" , lowerCAmelCase : Tuple = None , lowerCAmelCase : Optional[Any] = None , lowerCAmelCase : Any = None , ):
super().__init__()
lowerCAmelCase = num_attention_heads
lowerCAmelCase = attention_head_dim
lowerCAmelCase = num_attention_heads * attention_head_dim
lowerCAmelCase = additional_embeddings
lowerCAmelCase = time_embed_dim or inner_dim
lowerCAmelCase = embedding_proj_dim or embedding_dim
lowerCAmelCase = clip_embed_dim or embedding_dim
lowerCAmelCase = Timesteps(_UpperCAmelCase , _UpperCAmelCase , 0 )
lowerCAmelCase = TimestepEmbedding(_UpperCAmelCase , _UpperCAmelCase , out_dim=_UpperCAmelCase , act_fn=_UpperCAmelCase )
lowerCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase )
if embedding_proj_norm_type is None:
lowerCAmelCase = None
elif embedding_proj_norm_type == "layer":
lowerCAmelCase = nn.LayerNorm(_UpperCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
lowerCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase )
if encoder_hid_proj_type is None:
lowerCAmelCase = None
elif encoder_hid_proj_type == "linear":
lowerCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
lowerCAmelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _UpperCAmelCase ) )
if added_emb_type == "prd":
lowerCAmelCase = nn.Parameter(torch.zeros(1 , 1 , _UpperCAmelCase ) )
elif added_emb_type is None:
lowerCAmelCase = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , dropout=_UpperCAmelCase , activation_fn="""gelu""" , attention_bias=_UpperCAmelCase , )
for d in range(_UpperCAmelCase )
] )
if norm_in_type == "layer":
lowerCAmelCase = nn.LayerNorm(_UpperCAmelCase )
elif norm_in_type is None:
lowerCAmelCase = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
lowerCAmelCase = nn.LayerNorm(_UpperCAmelCase )
lowerCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0000.0 )
causal_attention_mask.triu_(1 )
lowerCAmelCase = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , _UpperCAmelCase , persistent=_UpperCAmelCase )
lowerCAmelCase = nn.Parameter(torch.zeros(1 , _UpperCAmelCase ) )
lowerCAmelCase = nn.Parameter(torch.zeros(1 , _UpperCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowercase ( self : str ):
lowerCAmelCase = {}
def fn_recursive_add_processors(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ):
if hasattr(_UpperCAmelCase , """set_processor""" ):
lowerCAmelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , _UpperCAmelCase , _UpperCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return processors
def __lowercase ( self : Any , lowerCAmelCase : str ):
lowerCAmelCase = len(self.attn_processors.keys() )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(_UpperCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ):
if hasattr(_UpperCAmelCase , """set_processor""" ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
module.set_processor(_UpperCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , _UpperCAmelCase , _UpperCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __lowercase ( self : Dict ):
self.set_attn_processor(AttnProcessor() )
def __lowercase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : str = None , lowerCAmelCase : Optional[Any] = None , lowerCAmelCase : str = True , ):
lowerCAmelCase = hidden_states.shape[0]
lowerCAmelCase = timestep
if not torch.is_tensor(_UpperCAmelCase ):
lowerCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowerCAmelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCAmelCase = timesteps * torch.ones(_UpperCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
lowerCAmelCase = self.time_proj(_UpperCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
lowerCAmelCase = timesteps_projected.to(dtype=self.dtype )
lowerCAmelCase = self.time_embedding(_UpperCAmelCase )
if self.embedding_proj_norm is not None:
lowerCAmelCase = self.embedding_proj_norm(_UpperCAmelCase )
lowerCAmelCase = self.embedding_proj(_UpperCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
lowerCAmelCase = self.encoder_hidden_states_proj(_UpperCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
lowerCAmelCase = self.proj_in(_UpperCAmelCase )
lowerCAmelCase = self.positional_embedding.to(hidden_states.dtype )
lowerCAmelCase = []
lowerCAmelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(_UpperCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
lowerCAmelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
lowerCAmelCase = hidden_states[:, None, :]
lowerCAmelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
lowerCAmelCase = self.prd_embedding.to(hidden_states.dtype ).expand(_UpperCAmelCase , -1 , -1 )
additional_embeds.append(_UpperCAmelCase )
lowerCAmelCase = torch.cat(
_UpperCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
lowerCAmelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
lowerCAmelCase = F.pad(
_UpperCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
lowerCAmelCase = hidden_states + positional_embeddings
if attention_mask is not None:
lowerCAmelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
lowerCAmelCase = F.pad(_UpperCAmelCase , (0, self.additional_embeddings) , value=0.0 )
lowerCAmelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
lowerCAmelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
lowerCAmelCase = self.norm_in(_UpperCAmelCase )
for block in self.transformer_blocks:
lowerCAmelCase = block(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
lowerCAmelCase = self.norm_out(_UpperCAmelCase )
if self.prd_embedding is not None:
lowerCAmelCase = hidden_states[:, -1]
else:
lowerCAmelCase = hidden_states[:, additional_embeddings_len:]
lowerCAmelCase = self.proj_to_clip_embeddings(_UpperCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_UpperCAmelCase )
def __lowercase ( self : Optional[int] , lowerCAmelCase : Optional[Any] ):
lowerCAmelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 169
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = AlbertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = AlbertForPreTraining(__lowercase)
# Load weights from tf checkpoint
load_tf_weights_in_albert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 23
| 0
|
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowerCAmelCase_ ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return EnvironmentCommand()
def lowerCAmelCase_ ( UpperCamelCase__ : str ):
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class lowerCamelCase__ ( _UpperCAmelCase ):
@staticmethod
def SCREAMING_SNAKE_CASE_ ( A_ : ArgumentParser ):
'''simple docstring'''
__lowercase = parser.add_parser("""env""" )
download_parser.set_defaults(func=__UpperCamelCase )
download_parser.add_argument(
"""--accelerate-config_file""" , default=__UpperCamelCase , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=__UpperCamelCase )
def __init__( self : Any , A_ : List[Any] , *A_ : Any ):
'''simple docstring'''
__lowercase = accelerate_config_file
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = """not installed"""
if is_safetensors_available():
import safetensors
__lowercase = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
__lowercase = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
__lowercase = """not installed"""
__lowercase = __lowercase = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__lowercase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(__UpperCamelCase ):
__lowercase = load_config_from_file(self._accelerate_config_file ).to_dict()
__lowercase = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(__UpperCamelCase , __UpperCamelCase )
else F'''\t{accelerate_config}'''
)
__lowercase = """not installed"""
__lowercase = """NA"""
if is_torch_available():
import torch
__lowercase = torch.__version__
__lowercase = torch.cuda.is_available()
__lowercase = """not installed"""
__lowercase = """NA"""
if is_tf_available():
import tensorflow as tf
__lowercase = tf.__version__
try:
# deprecated in v2.1
__lowercase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__lowercase = bool(tf.config.list_physical_devices("""GPU""" ) )
__lowercase = """not installed"""
__lowercase = """not installed"""
__lowercase = """not installed"""
__lowercase = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
__lowercase = flax.__version__
__lowercase = jax.__version__
__lowercase = jaxlib.__version__
__lowercase = jax.lib.xla_bridge.get_backend().platform
__lowercase = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'''{safetensors_version}''',
"""Accelerate version""": F'''{accelerate_version}''',
"""Accelerate config""": F'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''',
"""Jax version""": F'''{jax_version}''',
"""JaxLib version""": F'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(__UpperCamelCase ) )
return info
@staticmethod
def SCREAMING_SNAKE_CASE_ ( A_ : Union[str, Any] ):
'''simple docstring'''
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 719
|
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def lowerCAmelCase_ ( UpperCamelCase__ : str ):
"""simple docstring"""
def decorator(UpperCamelCase__ : Tuple ):
__lowercase = getattr(UpperCamelCase__ , """handle_key""" , [] )
handle += [key]
setattr(UpperCamelCase__ , """handle_key""" , UpperCamelCase__ )
return func
return decorator
def lowerCAmelCase_ ( *UpperCamelCase__ : List[str] ):
"""simple docstring"""
def decorator(UpperCamelCase__ : Tuple ):
__lowercase = getattr(UpperCamelCase__ , """handle_key""" , [] )
handle += keys
setattr(UpperCamelCase__ , """handle_key""" , UpperCamelCase__ )
return func
return decorator
class lowerCamelCase__ ( _a ):
def __new__( cls : str , A_ : Optional[Any] , A_ : Union[str, Any] , A_ : int ):
'''simple docstring'''
__lowercase = super().__new__(cls , A_ , A_ , A_ )
if not hasattr(A_ , """key_handler""" ):
setattr(A_ , """key_handler""" , {} )
setattr(A_ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__lowercase = getattr(A_ , """handle_key""" , [] )
for key in handled_keys:
__lowercase = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict ):
'''simple docstring'''
__lowercase = get_character()
if char != KEYMAP["undefined"]:
__lowercase = ord(A_ )
__lowercase = cls.key_handler.get(A_ )
if handler:
__lowercase = char
return handler(cls )
else:
return None
def lowerCAmelCase_ ( cls : int ):
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 442
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]=7 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : List[str]=1_8 , lowerCAmelCase_ : Union[str, Any]=3_0 , lowerCAmelCase_ : int=4_0_0 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : List[Any]=None , ) -> Union[str, Any]:
__lowerCAmelCase = size if size is not None else {'shortest_edge': 2_0}
__lowerCAmelCase = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
def lowercase ( self : Tuple ) -> Optional[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase ( self : Dict ) -> int:
__lowerCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def lowercase ( self : List[Any] ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self : Any ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'crop_size' ) )
def lowercase ( self : int ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def lowercase ( self : List[Any] ) -> str:
pass
def lowercase ( self : Any ) -> str:
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowerCAmelCase = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase ( self : List[Any] ) -> List[Any]:
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowerCAmelCase = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase ( self : List[Any] ) -> List[Any]:
# Initialize image_processing
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowerCAmelCase = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 53
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def a_ ( lowerCAmelCase_ : str=None ):
if subparsers is not None:
__lowerCAmelCase = subparsers.add_parser('env' )
else:
__lowerCAmelCase = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file', default=lowerCAmelCase_, help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def a_ ( lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = torch.__version__
__lowerCAmelCase = torch.cuda.is_available()
__lowerCAmelCase = is_xpu_available()
__lowerCAmelCase = is_npu_available()
__lowerCAmelCase = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ):
__lowerCAmelCase = load_config_from_file(args.config_file ).to_dict()
__lowerCAmelCase = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'PyTorch XPU available': str(lowerCAmelCase_ ),
'PyTorch NPU available': str(lowerCAmelCase_ ),
'System RAM': F"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
__lowerCAmelCase = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
__lowerCAmelCase = (
'\n'.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_, lowerCAmelCase_ )
else F"""\t{accelerate_config}"""
)
print(lowerCAmelCase_ )
__lowerCAmelCase = accelerate_config
return info
def a_ ( ):
__lowerCAmelCase = env_command_parser()
__lowerCAmelCase = parser.parse_args()
env_command(lowerCAmelCase_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 53
| 1
|
import string
from math import logaa
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
__lowercase = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
__lowercase = corpus_without_punctuation.split('''\n''' )
__lowercase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_A ))
def _A ( A__ , A__ , A__=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def _A ( A__ , A__ ):
"""simple docstring"""
return round(tf * idf , 3 )
| 719
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = int(np.ceil((x_end - xa) / step_size ) )
__lowercase = np.zeros((n + 1,) )
__lowercase = ya
__lowercase = xa
for k in range(A__ ):
__lowercase = y[k] + step_size * ode_func(A__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624
| 0
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
class __magic_name__ :
def __init__( self , _a = None , _a = None , _a=None , _a=None ) -> Dict:
if not conversation_id:
lowerCAmelCase_ = uuid.uuida()
if past_user_inputs is None:
lowerCAmelCase_ = []
if generated_responses is None:
lowerCAmelCase_ = []
lowerCAmelCase_ = conversation_id
lowerCAmelCase_ = past_user_inputs
lowerCAmelCase_ = generated_responses
lowerCAmelCase_ = text
def __eq__( self , _a ) -> List[str]:
if not isinstance(_a , _a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __a ( self , _a , _a = False ) -> int:
if self.new_user_input:
if overwrite:
logger.warning(
f"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
f"with: \"{text}\"." )
lowerCAmelCase_ = text
else:
logger.warning(
f"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
f"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
lowerCAmelCase_ = text
def __a ( self ) -> str:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowerCAmelCase_ = None
def __a ( self , _a ) -> str:
self.generated_responses.append(_a )
def __a ( self ) -> int:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> List[str]:
lowerCAmelCase_ = f"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
lowerCAmelCase_ = '''user''' if is_user else '''bot'''
output += f"{name} >> {text} \n"
return output
@add_end_docstrings(
A__ , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class __magic_name__ (A__ ):
def __init__( self , *_a , **_a ) -> int:
super().__init__(*_a , **_a )
if self.tokenizer.pad_token_id is None:
lowerCAmelCase_ = self.tokenizer.eos_token
def __a ( self , _a=None , _a=None , _a=None , **_a ) -> List[str]:
lowerCAmelCase_ = {}
lowerCAmelCase_ = {}
lowerCAmelCase_ = {}
if min_length_for_response is not None:
lowerCAmelCase_ = min_length_for_response
if minimum_tokens is not None:
lowerCAmelCase_ = minimum_tokens
if "max_length" in generate_kwargs:
lowerCAmelCase_ = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowerCAmelCase_ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_a )
return preprocess_params, forward_params, postprocess_params
def __call__( self , _a , _a=0 , **_a ) -> Dict:
lowerCAmelCase_ = super().__call__(_a , num_workers=_a , **_a )
if isinstance(_a , _a ) and len(_a ) == 1:
return outputs[0]
return outputs
def __a ( self , _a , _a=32 ) -> Any:
if not isinstance(_a , _a ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
f"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"Add user inputs with the conversation\'s `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
lowerCAmelCase_ = self.tokenizer._build_conversation_input_ids(_a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowerCAmelCase_ = self._legacy_parse_and_tokenize(_a )
if self.framework == "pt":
lowerCAmelCase_ = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowerCAmelCase_ = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __a ( self , _a , _a=10 , **_a ) -> Optional[int]:
lowerCAmelCase_ = generate_kwargs.get("max_length" , self.model.config.max_length )
lowerCAmelCase_ = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
lowerCAmelCase_ = max_length - minimum_tokens
lowerCAmelCase_ = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
lowerCAmelCase_ = model_inputs['''attention_mask'''][:, -trim:]
lowerCAmelCase_ = model_inputs.pop("conversation" )
lowerCAmelCase_ = max_length
lowerCAmelCase_ = self.model.generate(**_a , **_a )
if self.model.config.is_encoder_decoder:
lowerCAmelCase_ = 1
else:
lowerCAmelCase_ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __a ( self , _a , _a=True ) -> Optional[Any]:
lowerCAmelCase_ = model_outputs['''output_ids''']
lowerCAmelCase_ = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , )
lowerCAmelCase_ = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(_a )
return conversation
def __a ( self , _a ) -> Tuple:
lowerCAmelCase_ = self.tokenizer.eos_token_id
lowerCAmelCase_ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_a , add_special_tokens=_a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_a , add_special_tokens=_a ) )
if len(_a ) > self.tokenizer.model_max_length:
lowerCAmelCase_ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 122
|
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def a ( __snake_case : float, __snake_case : float ):
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 608
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = ['image_processor', 'tokenizer']
lowerCamelCase_ = 'Pix2StructImageProcessor'
lowerCamelCase_ = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[int] =False
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def __call__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = 2048 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase__ : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase : Optional[Any] =self.tokenizer
lowercase : str =self.tokenizer(
text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase : List[str] =self.image_processor(
UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , max_patches=UpperCAmelCase__ , **UpperCAmelCase__ )
else:
# add pixel_values and bbox
lowercase : Optional[int] =self.image_processor(
UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , max_patches=UpperCAmelCase__ , header_text=UpperCAmelCase__ , **UpperCAmelCase__ )
if text is not None and not self.image_processor.is_vqa:
lowercase : Union[str, Any] =self.tokenizer(
text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
if "attention_mask" in text_encoding:
lowercase : List[Any] =text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
lowercase : Union[str, Any] =text_encoding.pop('''input_ids''' )
else:
lowercase : Tuple =None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase__ )
return encoding_image_processor
def lowerCamelCase_ ( self : Tuple , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Dict ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =self.tokenizer.model_input_names
lowercase : int =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 700
|
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = """▁"""
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = BigBirdTokenizer
lowerCamelCase_ = BigBirdTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = True
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
lowercase : Optional[int] =self.tokenizer_class(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] ='''<s>'''
lowercase : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1004 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase : Optional[int] =self.get_tokenizer()
lowercase : Any =self.get_rust_tokenizer()
lowercase : int ='''I was born in 92000, and this is falsé.'''
lowercase : List[str] =tokenizer.tokenize(UpperCAmelCase__ )
lowercase : Dict =rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : str =tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_rust_tokenizer()
lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase__ )
lowercase : Union[str, Any] =rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple =BigBirdTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
lowercase : Tuple =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
lowercase : Tuple =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : Any =tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase : List[Any] =tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str ='''Hello World!'''
lowercase : Union[str, Any] =[65, 18536, 2260, 101, 66]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
lowercase : Tuple =[65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase : List[str] =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase : Dict =''' '''.join(UpperCAmelCase__ )
lowercase : Union[str, Any] =self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Dict =self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase__ )
lowercase : Optional[int] =BigBirdConfig(attention_type='''original_full''' )
lowercase : Dict =BigBirdModel(UpperCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
lowercase : Dict =tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
# fmt: off
lowercase : str ={'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 88
| 0
|
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class _snake_case :
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : list[tuple[float, float]] ):
SCREAMING_SNAKE_CASE:List[Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
SCREAMING_SNAKE_CASE:Any = len(_lowerCamelCase ) - 1
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE:Union[str, Any] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree ,_lowerCamelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_lowerCamelCase ) ,5 ) == 1
return output_values
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE:Optional[Any] = self.basis_function(_lowerCamelCase )
SCREAMING_SNAKE_CASE:Tuple = 0.0
SCREAMING_SNAKE_CASE:List[str] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : float = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
SCREAMING_SNAKE_CASE:Dict = [] # x coordinates of points to plot
SCREAMING_SNAKE_CASE:List[Any] = [] # y coordinates of points to plot
SCREAMING_SNAKE_CASE:Tuple = 0.0
while t <= 1:
SCREAMING_SNAKE_CASE:Optional[Any] = self.bezier_curve_function(_lowerCamelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
SCREAMING_SNAKE_CASE:Any = [i[0] for i in self.list_of_points]
SCREAMING_SNAKE_CASE:List[str] = [i[1] for i in self.list_of_points]
plt.plot(
_lowerCamelCase ,_lowerCamelCase ,color="blue" ,label="Curve of Degree " + str(self.degree ) ,)
plt.scatter(_lowerCamelCase ,_lowerCamelCase ,color="red" ,label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 143
|
"""simple docstring"""
from __future__ import annotations
import math
class lowerCAmelCase__ :
def __init__( self : int , _lowerCamelCase : int ):
_snake_case = size
# approximate the overall size of segment tree with given value
_snake_case = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_snake_case = [0 for i in range(0 , 4 * size )]
_snake_case = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase ( self : Optional[Any] , _lowerCamelCase : int ):
return idx * 2
def lowercase ( self : Dict , _lowerCamelCase : int ):
return idx * 2 + 1
def lowercase ( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] ):
if left_element == right_element:
_snake_case = a[left_element - 1]
else:
_snake_case = (left_element + right_element) // 2
self.build(self.left(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.build(self.right(_lowerCamelCase ) , mid + 1 , _lowerCamelCase , _lowerCamelCase )
_snake_case = max(
self.segment_tree[self.left(_lowerCamelCase )] , self.segment_tree[self.right(_lowerCamelCase )] )
def lowercase ( self : str , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
if self.flag[idx] is True:
_snake_case = self.lazy[idx]
_snake_case = False
if left_element != right_element:
_snake_case = self.lazy[idx]
_snake_case = self.lazy[idx]
_snake_case = True
_snake_case = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_snake_case = val
if left_element != right_element:
_snake_case = val
_snake_case = val
_snake_case = True
_snake_case = True
return True
_snake_case = (left_element + right_element) // 2
self.update(self.left(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.update(self.right(_lowerCamelCase ) , mid + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_snake_case = max(
self.segment_tree[self.left(_lowerCamelCase )] , self.segment_tree[self.right(_lowerCamelCase )] )
return True
def lowercase ( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
if self.flag[idx] is True:
_snake_case = self.lazy[idx]
_snake_case = False
if left_element != right_element:
_snake_case = self.lazy[idx]
_snake_case = self.lazy[idx]
_snake_case = True
_snake_case = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_snake_case = (left_element + right_element) // 2
_snake_case = self.query(self.left(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_snake_case = self.query(self.right(_lowerCamelCase ) , mid + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return max(_lowerCamelCase , _lowerCamelCase )
def __str__( self : List[Any] ):
return str([self.query(1 , 1 , self.size , _lowerCamelCase , _lowerCamelCase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
UpperCAmelCase__ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
UpperCAmelCase__ = 15
UpperCAmelCase__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 224
| 0
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowercase : List[Any] = TypeVar("""T""")
def UpperCAmelCase_ ( _UpperCAmelCase ):
return (position - 1) // 2
def UpperCAmelCase_ ( _UpperCAmelCase ):
return (2 * position) + 1
def UpperCAmelCase_ ( _UpperCAmelCase ):
return (2 * position) + 2
class a__ ( Generic[T] ):
def __init__( self : List[str] ) -> None:
"""simple docstring"""
lowerCamelCase_: list[tuple[T, int]] = []
lowerCamelCase_: dict[T, int] = {}
lowerCamelCase_: int = 0
def __len__( self : Tuple ) -> int:
"""simple docstring"""
return self.elements
def __repr__( self : Any ) -> str:
"""simple docstring"""
return str(self.heap )
def lowerCAmelCase ( self : List[Any] ) -> bool:
"""simple docstring"""
# Check if the priority queue is empty
return self.elements == 0
def lowerCAmelCase ( self : Any , A_ : T , A_ : int ) -> None:
"""simple docstring"""
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
lowerCamelCase_: Optional[int] = self.elements
self.elements += 1
self._bubble_up(A_ )
def lowerCAmelCase ( self : str ) -> T:
"""simple docstring"""
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowerCamelCase_ , lowerCamelCase_: Union[str, Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowerCamelCase_ , lowerCamelCase_: Optional[int] = self.heap[0]
self._bubble_down(A_ )
return elem
def lowerCAmelCase ( self : Optional[Any] , A_ : T , A_ : int ) -> None:
"""simple docstring"""
# Update the weight of the given key
lowerCamelCase_: List[Any] = self.position_map[elem]
lowerCamelCase_: List[Any] = (elem, weight)
if position > 0:
lowerCamelCase_: List[str] = get_parent_position(A_ )
lowerCamelCase_ , lowerCamelCase_: Tuple = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(A_ )
else:
self._bubble_down(A_ )
else:
self._bubble_down(A_ )
def lowerCAmelCase ( self : Tuple , A_ : T ) -> None:
"""simple docstring"""
# Place a node at the proper position (upward movement) [to be used internally
# only]
lowerCamelCase_: Optional[Any] = self.position_map[elem]
if curr_pos == 0:
return None
lowerCamelCase_: Any = get_parent_position(A_ )
lowerCamelCase_ , lowerCamelCase_: int = self.heap[curr_pos]
lowerCamelCase_ , lowerCamelCase_: List[str] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(A_ , A_ )
return self._bubble_up(A_ )
return None
def lowerCAmelCase ( self : str , A_ : T ) -> None:
"""simple docstring"""
# Place a node at the proper position (downward movement) [to be used
# internally only]
lowerCamelCase_: Dict = self.position_map[elem]
lowerCamelCase_ , lowerCamelCase_: Union[str, Any] = self.heap[curr_pos]
lowerCamelCase_: Union[str, Any] = get_child_left_position(A_ )
lowerCamelCase_: int = get_child_right_position(A_ )
if child_left_position < self.elements and child_right_position < self.elements:
lowerCamelCase_ , lowerCamelCase_: str = self.heap[child_left_position]
lowerCamelCase_ , lowerCamelCase_: Dict = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(A_ , A_ )
return self._bubble_down(A_ )
if child_left_position < self.elements:
lowerCamelCase_ , lowerCamelCase_: Optional[int] = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(A_ , A_ )
return self._bubble_down(A_ )
else:
return None
if child_right_position < self.elements:
lowerCamelCase_ , lowerCamelCase_: List[str] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(A_ , A_ )
return self._bubble_down(A_ )
return None
def lowerCAmelCase ( self : Any , A_ : int , A_ : int ) -> None:
"""simple docstring"""
# Swap the nodes at the given positions
lowerCamelCase_: str = self.heap[nodea_pos][0]
lowerCamelCase_: int = self.heap[nodea_pos][0]
lowerCamelCase_ , lowerCamelCase_: List[str] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowerCamelCase_: Dict = nodea_pos
lowerCamelCase_: Optional[Any] = nodea_pos
class a__ ( Generic[T] ):
def __init__( self : Tuple ) -> None:
"""simple docstring"""
lowerCamelCase_: dict[T, dict[T, int]] = {}
lowerCamelCase_: int = 0
def __repr__( self : Optional[int] ) -> str:
"""simple docstring"""
return str(self.connections )
def __len__( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.nodes
def lowerCAmelCase ( self : Dict , A_ : T ) -> None:
"""simple docstring"""
# Add a node in the graph if it is not in the graph
if node not in self.connections:
lowerCamelCase_: List[str] = {}
self.nodes += 1
def lowerCAmelCase ( self : Tuple , A_ : T , A_ : T , A_ : int ) -> None:
"""simple docstring"""
# Add an edge between 2 nodes in the graph
self.add_node(A_ )
self.add_node(A_ )
lowerCamelCase_: Union[str, Any] = weight
lowerCamelCase_: Dict = weight
def UpperCAmelCase_ ( _UpperCAmelCase , ):
lowerCamelCase_: dict[T, int] = {node: maxsize for node in graph.connections}
lowerCamelCase_: dict[T, T | None] = {node: None for node in graph.connections}
lowerCamelCase_: MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_UpperCAmelCase , _UpperCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
lowerCamelCase_: List[str] = priority_queue.extract_min()
lowerCamelCase_: List[str] = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCamelCase_: Optional[int] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_UpperCAmelCase , dist[neighbour] )
lowerCamelCase_: Tuple = node
# running prim's algorithm
while not priority_queue.is_empty():
lowerCamelCase_: Any = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCamelCase_: Tuple = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_UpperCAmelCase , dist[neighbour] )
lowerCamelCase_: List[str] = node
return dist, parent
| 584
|
from __future__ import annotations
import os
from collections.abc import Mapping
lowercase : str = tuple[int, int]
class a__ :
def __init__( self : Optional[Any] , A_ : set[int] , A_ : Mapping[EdgeT, int] ) -> None:
"""simple docstring"""
lowerCamelCase_: set[int] = vertices
lowerCamelCase_: dict[EdgeT, int] = {
(min(A_ ), max(A_ )): weight for edge, weight in edges.items()
}
def lowerCAmelCase ( self : Optional[int] , A_ : EdgeT , A_ : int ) -> None:
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCamelCase_: Tuple = weight
def lowerCAmelCase ( self : List[str] ) -> Graph:
"""simple docstring"""
lowerCamelCase_: Graph = Graph({min(self.vertices )} , {} )
lowerCamelCase_: EdgeT
lowerCamelCase_: int
lowerCamelCase_: EdgeT
lowerCamelCase_: int
while len(subgraph.vertices ) < len(self.vertices ):
lowerCamelCase_: List[str] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCamelCase_: List[str] = edge
lowerCamelCase_: str = weight
subgraph.add_edge(A_ , A_ )
return subgraph
def UpperCAmelCase_ ( _UpperCAmelCase = "p107_network.txt" ):
lowerCamelCase_: str = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
lowerCamelCase_: str = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase_: dict[EdgeT, int] = {}
lowerCamelCase_: list[str]
lowerCamelCase_: int
lowerCamelCase_: int
with open(_UpperCAmelCase ) as f:
lowerCamelCase_: Optional[int] = f.read().strip().split("""\n""" )
lowerCamelCase_: Dict = [line.split(""",""" ) for line in data]
for edgea in range(1 , len(_UpperCAmelCase ) ):
for edgea in range(_UpperCAmelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCamelCase_: Any = int(adjaceny_matrix[edgea][edgea] )
lowerCamelCase_: Graph = Graph(set(range(len(_UpperCAmelCase ) ) ) , _UpperCAmelCase )
lowerCamelCase_: Graph = graph.prims_algorithm()
lowerCamelCase_: int = sum(graph.edges.values() )
lowerCamelCase_: int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 584
| 1
|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = 42
A_ = 42
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: List[str] , __A: int ) -> Dict:
_A = [[] for _ in range(__A )]
_A = size
def __getitem__( self: str , __A: int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def __A ( self: Optional[int] ) -> str:
return self._size
def __A ( self: Optional[Any] , __A: int , __A: int , __A: int ) -> Any:
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(__A , __A ) )
def __A ( self: Optional[int] , __A: int , __A: int ) -> int | None:
_A = deque([start_vertex] )
_A = [None] * self.size
_A = 0
while queue:
_A = queue.popleft()
_A = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_A = current_distance + edge.weight
_A = distances[edge.destination_vertex]
if (
isinstance(__A , __A )
and new_distance >= dest_vertex_distance
):
continue
_A = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 484
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 484
| 1
|
class lowercase_ :
def __init__( self , __A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : int =val
SCREAMING_SNAKE_CASE_ : Optional[Any] =None
SCREAMING_SNAKE_CASE_ : str =None
def _snake_case ( self , __A ) -> Union[str, Any]:
if self.val:
if val < self.val:
if self.left is None:
SCREAMING_SNAKE_CASE_ : str =Node(UpperCamelCase__ )
else:
self.left.insert(UpperCamelCase__ )
elif val > self.val:
if self.right is None:
SCREAMING_SNAKE_CASE_ : Dict =Node(UpperCamelCase__ )
else:
self.right.insert(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE_ : List[str] =val
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ) -> int:
if root:
inorder(root.left , __UpperCamelCase )
res.append(root.val )
inorder(root.right , __UpperCamelCase )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[str] ) -> Tuple:
if len(__UpperCamelCase ) == 0:
return arr
SCREAMING_SNAKE_CASE_ : Any =Node(arr[0] )
for i in range(1 , len(__UpperCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
SCREAMING_SNAKE_CASE_ : Optional[Any] =[]
inorder(__UpperCamelCase , __UpperCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 711
|
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str ) -> str:
return " ".join(
''''''.join(word[::-1] ) if len(UpperCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 431
| 0
|
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
UpperCAmelCase__ : List[Any] = sorted(string.lower() )
return len(__UpperCamelCase ) == len(set(__UpperCamelCase ) )
if __name__ == "__main__":
__UpperCAmelCase = input('Enter a string ').strip()
__UpperCAmelCase = is_isogram(input_str)
print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 65
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCAmelCase = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __lowercase :
snake_case_ = PegasusConfig
snake_case_ = {}
snake_case_ = """gelu"""
def __init__( self : List[Any] ,A : int ,A : Optional[Any]=13 ,A : Dict=7 ,A : Dict=True ,A : Any=False ,A : Dict=99 ,A : int=32 ,A : Optional[int]=5 ,A : Union[str, Any]=4 ,A : Union[str, Any]=37 ,A : str=0.1 ,A : int=0.1 ,A : Optional[int]=20 ,A : Tuple=2 ,A : str=1 ,A : Optional[Any]=0 ,):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : List[Any] = seq_length
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : Dict = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Any = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = eos_token_id
UpperCAmelCase__ : Union[str, Any] = pad_token_id
UpperCAmelCase__ : List[str] = bos_token_id
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ).clip(3 ,self.vocab_size )
UpperCAmelCase__ : List[str] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase__ : Any = np.concatenate([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ : str = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase__ : Optional[Any] = prepare_pegasus_inputs_dict(A ,A ,A )
return config, inputs_dict
def __lowercase ( self : Any ,A : Optional[int] ,A : str ,A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = 20
UpperCAmelCase__ : Dict = model_class_name(A )
UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,A ,A )
UpperCAmelCase__ : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) ,dtype="""i4""" )
UpperCAmelCase__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
UpperCAmelCase__ : Optional[int] = model.decode(
decoder_input_ids[:, :-1] ,A ,decoder_attention_mask=A ,past_key_values=A ,decoder_position_ids=A ,)
UpperCAmelCase__ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" )
UpperCAmelCase__ : int = model.decode(
decoder_input_ids[:, -1:] ,A ,decoder_attention_mask=A ,past_key_values=outputs_cache.past_key_values ,decoder_position_ids=A ,)
UpperCAmelCase__ : Dict = model.decode(A ,A )
UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" )
def __lowercase ( self : Optional[int] ,A : str ,A : Dict ,A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = 20
UpperCAmelCase__ : str = model_class_name(A )
UpperCAmelCase__ : Any = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase__ : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] ,axis=-1 ,)
UpperCAmelCase__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,A ,A )
UpperCAmelCase__ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
UpperCAmelCase__ : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] ,A ,decoder_attention_mask=A ,past_key_values=A ,decoder_position_ids=A ,)
UpperCAmelCase__ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" )
UpperCAmelCase__ : Dict = model.decode(
decoder_input_ids[:, -1:] ,A ,past_key_values=outputs_cache.past_key_values ,decoder_attention_mask=A ,decoder_position_ids=A ,)
UpperCAmelCase__ : Union[str, Any] = model.decode(A ,A ,decoder_attention_mask=A )
UpperCAmelCase__ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase__ : Union[str, Any] = np.not_equal(__UpperCamelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
UpperCAmelCase__ : Tuple = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
snake_case_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = FlaxPegasusModelTester(self )
UpperCAmelCase__ : Optional[Any] = ConfigTester(self ,config_class=A )
def __lowercase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A ,A ,A )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A ,A ,A )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : List[Any] = self._prepare_for_class(A ,A )
UpperCAmelCase__ : int = model_class(A )
@jax.jit
def encode_jitted(A : Optional[int] ,A : Union[str, Any]=None ,**A : Optional[Any] ):
return model.encode(input_ids=A ,attention_mask=A )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase__ : int = encode_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase__ : Dict = encode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) ,len(A ) )
for jitted_output, output in zip(A ,A ):
self.assertEqual(jitted_output.shape ,output.shape )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Dict = model_class(A )
UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""] ,inputs_dict["""attention_mask"""] )
UpperCAmelCase__ : Dict = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(A : List[Any] ,A : Any ,A : List[Any] ):
return model.decode(
decoder_input_ids=A ,decoder_attention_mask=A ,encoder_outputs=A ,)
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase__ : Tuple = decode_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase__ : str = decode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) ,len(A ) )
for jitted_output, output in zip(A ,A ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class_name.from_pretrained("""google/pegasus-large""" ,from_pt=A )
UpperCAmelCase__ : Any = np.ones((1, 1) )
UpperCAmelCase__ : Optional[Any] = model(A )
self.assertIsNotNone(A )
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
UpperCAmelCase__ : Optional[Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
UpperCAmelCase__ : Union[str, Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
UpperCAmelCase__ : str = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
UpperCAmelCase__ : str = tokenizer(A ,return_tensors="""np""" ,truncation=A ,max_length=512 ,padding=A )
UpperCAmelCase__ : Union[str, Any] = model.generate(**A ,num_beams=2 ).sequences
UpperCAmelCase__ : int = tokenizer.batch_decode(A ,skip_special_tokens=A )
assert tgt_text == decoded
| 65
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def _lowerCamelCase ( UpperCAmelCase_ : float, UpperCAmelCase_ : float, UpperCAmelCase_ : float ) -> tuple:
"""simple docstring"""
A__ = namedtuple("result", "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage", power / current )
elif current == 0:
return result("current", power / voltage )
elif power == 0:
return result("power", float(round(abs(voltage * current ), 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=sys.maxsize ) -> str:
A__ = "bilinear"
A__ = max_size
A__ = short_edge_length
def __call__( self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
A__ = []
for img in imgs:
A__ , A__ = img.shape[:2]
# later: provide list and randomly choose index for resize
A__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
A__ = size * 1.0 / min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if h < w:
A__ , A__ = size, scale * w
else:
A__ , A__ = scale * h, size
if max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) > self.max_size:
A__ = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = newh * scale
A__ = neww * scale
A__ = int(neww + 0.5 )
A__ = int(newh + 0.5 )
if img.dtype == np.uinta:
A__ = Image.fromarray(SCREAMING_SNAKE_CASE__ )
A__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
A__ = np.asarray(SCREAMING_SNAKE_CASE__ )
else:
A__ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
A__ = nn.functional.interpolate(
SCREAMING_SNAKE_CASE__ , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE__ ).squeeze(0 )
img_augs.append(SCREAMING_SNAKE_CASE__ )
return img_augs
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ ) -> str:
A__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
A__ = cfg.INPUT.FORMAT
A__ = cfg.SIZE_DIVISIBILITY
A__ = cfg.PAD_VALUE
A__ = cfg.INPUT.MAX_SIZE_TEST
A__ = cfg.MODEL.DEVICE
A__ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A__ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A__ = lambda SCREAMING_SNAKE_CASE__ : (x - self.pixel_mean) / self.pixel_std
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
A__ = tuple(max(SCREAMING_SNAKE_CASE__ ) for s in zip(*[img.shape for img in images] ) )
A__ = [im.shape[-2:] for im in images]
A__ = [
nn.functional.pad(
SCREAMING_SNAKE_CASE__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
return torch.stack(SCREAMING_SNAKE_CASE__ ), torch.tensor(SCREAMING_SNAKE_CASE__ )
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Optional[int]:
with torch.no_grad():
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = [images]
if single_image:
assert len(SCREAMING_SNAKE_CASE__ ) == 1
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(SCREAMING_SNAKE_CASE__ , images.pop(SCREAMING_SNAKE_CASE__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
SCREAMING_SNAKE_CASE__ , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
A__ = torch.tensor([im.shape[:2] for im in images] )
A__ = self.aug(SCREAMING_SNAKE_CASE__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
A__ = [self.normalizer(SCREAMING_SNAKE_CASE__ ) for x in images]
# now pad them to do the following operations
A__ , A__ = self.pad(SCREAMING_SNAKE_CASE__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
A__ = torch.true_divide(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCamelCase ( UpperCAmelCase_ : List[Any], UpperCAmelCase_ : List[str] ) -> List[Any]:
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCamelCase ( UpperCAmelCase_ : List[str], UpperCAmelCase_ : Tuple[int, int] ) -> str:
"""simple docstring"""
assert torch.isfinite(UpperCAmelCase_ ).all(), "Box tensor contains infinite or NaN!"
A__ , A__ = box_size
tensor[:, 0].clamp_(min=0, max=UpperCAmelCase_ )
tensor[:, 1].clamp_(min=0, max=UpperCAmelCase_ )
tensor[:, 2].clamp_(min=0, max=UpperCAmelCase_ )
tensor[:, 3].clamp_(min=0, max=UpperCAmelCase_ )
| 562
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__a: Optional[Any] = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Any = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__a: List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 108
|
'''simple docstring'''
from typing import Any
def A__ ( A_ ) -> list[Any]:
if not input_list:
return []
_lowercase = [input_list.count(A_ ) for value in input_list]
_lowercase = max(A_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(A_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 497
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowercase_ (_UpperCAmelCase ):
A__ : str = '''Salesforce/blip-image-captioning-base'''
A__ : Optional[int] = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
A__ : str = '''image_captioner'''
A__ : Tuple = AutoModelForVisionaSeq
A__ : Tuple = ['''image''']
A__ : List[Any] = ['''text''']
def __init__( self , *a_ , **a_ ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*a_ , **a_ )
def lowerCamelCase__ ( self , a_ ) ->Union[str, Any]:
'''simple docstring'''
return self.pre_processor(images=a_ , return_tensors="pt" )
def lowerCamelCase__ ( self , a_ ) ->List[Any]:
'''simple docstring'''
return self.model.generate(**a_ )
def lowerCamelCase__ ( self , a_ ) ->Tuple:
'''simple docstring'''
return self.pre_processor.batch_decode(a_ , skip_special_tokens=a_ )[0].strip()
| 612
|
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCAmelCase ( UpperCamelCase_: Features ) -> Optional[int]:
'''simple docstring'''
_a = np.inf
def set_batch_size(UpperCamelCase_: FeatureType ) -> None:
nonlocal batch_size
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_a = min(UpperCamelCase_ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_a = min(UpperCamelCase_ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and feature.dtype == "binary":
_a = min(UpperCamelCase_ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(UpperCamelCase_ , UpperCamelCase_ )
return None if batch_size is np.inf else batch_size
class lowercase_ (_UpperCAmelCase ):
def __init__( self , a_ , a_ = None , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , **a_ , ) ->Optional[int]:
'''simple docstring'''
super().__init__(
a_ , split=a_ , features=a_ , cache_dir=a_ , keep_in_memory=a_ , streaming=a_ , num_proc=a_ , **a_ , )
_a = path_or_paths if isinstance(a_ , a_ ) else {self.split: path_or_paths}
_a = _PACKAGED_DATASETS_MODULES["parquet"][1]
_a = Parquet(
cache_dir=a_ , data_files=a_ , features=a_ , hash=a_ , **a_ , )
def lowerCamelCase__ ( self ) ->str:
'''simple docstring'''
if self.streaming:
_a = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_a = None
_a = None
_a = None
_a = None
self.builder.download_and_prepare(
download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , num_proc=self.num_proc , )
_a = self.builder.as_dataset(
split=self.split , verification_mode=a_ , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
def __init__( self , a_ , a_ , a_ = None , **a_ , ) ->int:
'''simple docstring'''
_a = dataset
_a = path_or_buf
_a = batch_size or get_writer_batch_size(dataset.features )
_a = parquet_writer_kwargs
def lowerCamelCase__ ( self ) ->int:
'''simple docstring'''
_a = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
_a = self._write(file_obj=a_ , batch_size=a_ , **self.parquet_writer_kwargs )
else:
_a = self._write(file_obj=self.path_or_buf , batch_size=a_ , **self.parquet_writer_kwargs )
return written
def lowerCamelCase__ ( self , a_ , a_ , **a_ ) ->int:
'''simple docstring'''
_a = 0
_a = parquet_writer_kwargs.pop("path_or_buf" , a_ )
_a = self.dataset.features.arrow_schema
_a = pq.ParquetWriter(a_ , schema=a_ , **a_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , a_ ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
_a = query_table(
table=self.dataset._data , key=slice(a_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(a_ )
written += batch.nbytes
writer.close()
return written
| 612
| 1
|
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
def __init__( self: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: bool = True , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: int = 32 , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 255 , UpperCamelCase_: bool = True , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073] , UpperCamelCase_: Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711] , UpperCamelCase_: bool = True , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Union[str, Any]=30 , UpperCamelCase_: Optional[int]=400 , UpperCamelCase_: List[str]=3 , ) -> Tuple:
"""simple docstring"""
lowercase__ = parent
lowercase__ = do_resize
lowercase__ = size if size is not None else {'''shortest_edge''': 288}
lowercase__ = size_divisor
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = do_center_crop
lowercase__ = image_mean
lowercase__ = image_std
lowercase__ = do_pad
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = min_resolution
lowercase__ = max_resolution
def lowerCamelCase_ ( self: Tuple ) -> str:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any]=False ) -> str:
"""simple docstring"""
if not batched:
lowercase__ = self.size['''shortest_edge''']
lowercase__ = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
lowercase__ , lowercase__ = image.size
else:
lowercase__ , lowercase__ = image.shape[1], image.shape[2]
lowercase__ = size / min(UpperCamelCase_ , UpperCamelCase_ )
if h < w:
lowercase__ , lowercase__ = size, scale * w
else:
lowercase__ , lowercase__ = scale * h, size
lowercase__ = int((1_333 / 800) * size )
if max(UpperCamelCase_ , UpperCamelCase_ ) > max_size:
lowercase__ = max_size / max(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = newh * scale
lowercase__ = neww * scale
lowercase__ , lowercase__ = int(newh + 0.5 ), int(neww + 0.5 )
lowercase__ , lowercase__ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
lowercase__ = []
for image in image_inputs:
lowercase__ , lowercase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__ = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
lowercase__ = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[Any] = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self: int ) -> Tuple:
"""simple docstring"""
lowercase__ = BridgeTowerImageProcessingTester(self )
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size_divisor''' ) )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 43
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __UpperCamelCase :
def __init__( self ,_A ,_A=13 ,_A=7 ,_A=True ,_A=True ,_A=True ,_A=99 ,_A=32 ,_A=5 ,_A=4 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=512 ,_A=16 ,_A=2 ,_A=0.0_2 ,_A=3 ,_A=4 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : str = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : Dict = seq_length
_lowerCAmelCase : Union[str, Any] = is_training
_lowerCAmelCase : Tuple = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : int = type_sequence_label_size
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : Any = num_choices
_lowerCAmelCase : Tuple = scope
_lowerCAmelCase : int = self.vocab_size - 1
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] ,self.num_choices )
_lowerCAmelCase : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
_lowerCAmelCase : Dict = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,*_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = OpenAIGPTModel(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Dict = model(_A ,token_type_ids=_A ,head_mask=_A )
_lowerCAmelCase : Tuple = model(_A ,token_type_ids=_A )
_lowerCAmelCase : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,*_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = OpenAIGPTLMHeadModel(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(_A ,token_type_ids=_A ,labels=_A )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,*_A ):
'''simple docstring'''
_lowerCAmelCase : str = OpenAIGPTDoubleHeadsModel(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[int] = model(_A ,token_type_ids=_A ,labels=_A )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,*_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.num_labels
_lowerCAmelCase : Any = OpenAIGPTForSequenceClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCAmelCase : Tuple = model(_A ,token_type_ids=_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
),
) : int = config_and_inputs
_lowerCAmelCase : Any = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_UpperCAmelCase = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __lowerCamelCase ( self ,_A ,_A ,_A=False ):
'''simple docstring'''
_lowerCAmelCase : int = super()._prepare_for_class(_A ,_A ,return_labels=_A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_lowerCAmelCase : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_A ,)
_lowerCAmelCase : int = inputs_dict['labels']
_lowerCAmelCase : Tuple = inputs_dict['labels']
_lowerCAmelCase : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_A ,)
_lowerCAmelCase : Dict = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_A )
return inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = OpenAIGPTModelTester(self )
_lowerCAmelCase : Union[str, Any] = ConfigTester(self ,config_class=_A ,n_embd=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[Any] = OpenAIGPTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(_A )
_lowerCAmelCase : Any = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_A ) # the president is
_lowerCAmelCase : Dict = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_lowerCAmelCase : str = model.generate(_A ,do_sample=_A )
self.assertListEqual(output_ids[0].tolist() ,_A )
| 259
| 0
|
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
lowerCamelCase__ = {
'''allenai/led-base-16384''': 1_63_84,
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = LEDTokenizer
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self , _a=None , _a=None , _a=None , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , _a=True , **_a , ) -> Union[str, Any]:
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
lowerCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _a ) != add_prefix_space:
lowerCAmelCase_ = getattr(_a , pre_tok_state.pop("type" ) )
lowerCAmelCase_ = add_prefix_space
lowerCAmelCase_ = pre_tok_class(**_a )
lowerCAmelCase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCAmelCase_ = "post_processor"
lowerCAmelCase_ = getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
lowerCAmelCase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase_ = tuple(state["sep"] )
if "cls" in state:
lowerCAmelCase_ = tuple(state["cls"] )
lowerCAmelCase_ = False
if state.get("add_prefix_space" , _a ) != add_prefix_space:
lowerCAmelCase_ = add_prefix_space
lowerCAmelCase_ = True
if state.get("trim_offsets" , _a ) != trim_offsets:
lowerCAmelCase_ = trim_offsets
lowerCAmelCase_ = True
if changes_to_apply:
lowerCAmelCase_ = getattr(_a , state.pop("type" ) )
lowerCAmelCase_ = component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __a ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __a ( self , _a ) -> int:
lowerCAmelCase_ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
lowerCAmelCase_ = value
def __a ( self , *_a , **_a ) -> BatchEncoding:
lowerCAmelCase_ = kwargs.get("is_split_into_words" , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*_a , **_a )
def __a ( self , *_a , **_a ) -> BatchEncoding:
lowerCAmelCase_ = kwargs.get("is_split_into_words" , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*_a , **_a )
def __a ( self , _a , _a = None ) -> Tuple[str]:
lowerCAmelCase_ = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __a ( self , _a , _a=None ) -> int:
lowerCAmelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __a ( self , _a , _a = None ) -> List[int]:
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self , _a , _a = None , _a = PaddingStrategy.DO_NOT_PAD , _a = None , _a = None , ) -> dict:
lowerCAmelCase_ = super()._pad(
encoded_inputs=_a , max_length=_a , padding_strategy=_a , pad_to_multiple_of=_a , return_attention_mask=_a , )
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase_ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase_ = len(encoded_inputs["global_attention_mask"] ) != len(_a )
if needs_to_be_padded:
lowerCAmelCase_ = len(_a ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase_ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase_ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 700
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __a ( self ) -> Dict:
lowerCAmelCase_ = 1
lowerCAmelCase_ = 3
lowerCAmelCase_ = (32, 32)
lowerCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
@property
def __a ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def __a ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def __a ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_a )
@property
def __a ( self ) -> List[str]:
def extract(*_a , **_a ):
class __magic_name__ :
def __init__( self ) -> List[str]:
lowerCAmelCase_ = torch.ones([0] )
def __a ( self , _a ) -> int:
self.pixel_values.to(_a )
return self
return Out()
return extract
def __a ( self ) -> Dict:
lowerCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.dummy_cond_unet
lowerCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_a , set_alpha_to_one=_a , )
lowerCAmelCase_ = self.dummy_vae
lowerCAmelCase_ = self.dummy_text_encoder
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ = StableDiffusionPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = "A painting of a squirrel eating a burger"
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(0 )
lowerCAmelCase_ = sd_pipe([prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(0 )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_a , )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.dummy_cond_unet
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=_a )
lowerCAmelCase_ = self.dummy_vae
lowerCAmelCase_ = self.dummy_text_encoder
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ = StableDiffusionPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = "A painting of a squirrel eating a burger"
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(0 )
lowerCAmelCase_ = sd_pipe([prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(0 )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=_a , )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Any:
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=_a )
assert isinstance(_a , _a )
assert isinstance(pipe.scheduler , _a )
assert pipe.safety_checker is None
lowerCAmelCase_ = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_a )
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(_a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCAmelCase_ = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.dummy_cond_unet
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=_a )
lowerCAmelCase_ = self.dummy_vae
lowerCAmelCase_ = self.dummy_text_encoder
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
lowerCAmelCase_ = unet.half()
lowerCAmelCase_ = vae.half()
lowerCAmelCase_ = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ = StableDiffusionPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = "A painting of a squirrel eating a burger"
lowerCAmelCase_ = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Any:
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_a )
lowerCAmelCase_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
lowerCAmelCase_ = 4003660346
lowerCAmelCase_ = 7
# without safety guidance (sld_guidance_scale = 0)
lowerCAmelCase_ = torch.manual_seed(_a )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
lowerCAmelCase_ = torch.manual_seed(_a )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=_a )
lowerCAmelCase_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = "padme amidala taking a bath artwork, safe for work, no nudity"
lowerCAmelCase_ = 2734971755
lowerCAmelCase_ = 7
lowerCAmelCase_ = torch.manual_seed(_a )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
lowerCAmelCase_ = torch.manual_seed(_a )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> int:
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
lowerCAmelCase_ = 1044355234
lowerCAmelCase_ = 12
lowerCAmelCase_ = torch.manual_seed(_a )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
lowerCAmelCase_ = torch.manual_seed(_a )
lowerCAmelCase_ = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 226
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : Dict , __A : str=False ) -> Any:
"""simple docstring"""
lowercase : str =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowercase : Union[str, Any] =[(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def lowercase_ ( __A : Optional[Any] , __A : Dict , __A : str=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowercase : Optional[Any] =''''''
else:
lowercase : Optional[Any] ='''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Any =state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowercase : str =state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase : Union[str, Any] =in_proj_weight[
: config.hidden_size, :
]
lowercase : Optional[Any] =in_proj_bias[: config.hidden_size]
lowercase : Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Tuple =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : Union[str, Any] =in_proj_weight[
-config.hidden_size :, :
]
lowercase : Any =in_proj_bias[-config.hidden_size :]
def lowercase_ ( __A : Optional[Any] , __A : Optional[int] , __A : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : str =dct.pop(__A )
lowercase : int =val
def lowercase_ ( ) -> str:
"""simple docstring"""
lowercase : Dict ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : List[str] =Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase_ ( __A : List[str] , __A : str ) -> Tuple:
"""simple docstring"""
lowercase : Any =DeiTConfig()
# all deit models have fine-tuned heads
lowercase : Dict =False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowercase : List[str] =1_0_0_0
lowercase : Any ='''huggingface/label-files'''
lowercase : List[Any] ='''imagenet-1k-id2label.json'''
lowercase : Optional[Any] =json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
lowercase : Dict ={int(__A ): v for k, v in idalabel.items()}
lowercase : Tuple =idalabel
lowercase : Any ={v: k for k, v in idalabel.items()}
lowercase : Dict =int(deit_name[-6:-4] )
lowercase : Any =int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
lowercase : Any =1_9_2
lowercase : Optional[int] =7_6_8
lowercase : List[Any] =1_2
lowercase : Tuple =3
elif deit_name[9:].startswith('''small''' ):
lowercase : List[Any] =3_8_4
lowercase : str =1_5_3_6
lowercase : List[Any] =1_2
lowercase : List[Any] =6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
lowercase : List[Any] =1_0_2_4
lowercase : str =4_0_9_6
lowercase : List[str] =2_4
lowercase : str =1_6
# load original model from timm
lowercase : Optional[Any] =timm.create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase : Optional[Any] =timm_model.state_dict()
lowercase : Dict =create_rename_keys(__A , __A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A , __A )
# load HuggingFace model
lowercase : Optional[int] =DeiTForImageClassificationWithTeacher(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by DeiTImageProcessor
lowercase : Tuple =int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowercase : List[str] =DeiTImageProcessor(size=__A , crop_size=config.image_size )
lowercase : Optional[int] =image_processor(images=prepare_img() , return_tensors='''pt''' )
lowercase : int =encoding['''pixel_values''']
lowercase : List[Any] =model(__A )
lowercase : str =timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
Path(__A ).mkdir(exist_ok=__A )
print(F'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__A )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 94
|
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def A__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A__ ( self : Any ) -> List[str]:
'''simple docstring'''
lowercase : List[str] ={'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(UpperCAmelCase )
def A__ ( self : Any ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =self._create_example_records()
lowercase : Tuple =Dataset.from_list(UpperCAmelCase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(UpperCAmelCase ):
self.assertDictEqual(UpperCAmelCase , example_records[i] )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Optional[Any] =self._create_example_records()
lowercase : Union[str, Any] =Dataset.from_list(UpperCAmelCase )
lowercase : Tuple =Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def A__ ( self : str ) -> List[Any]: # checks what happens with missing columns
'''simple docstring'''
lowercase : Tuple =[{'''col_1''': 1}, {'''col_2''': '''x'''}]
lowercase : Optional[Any] =Dataset.from_list(UpperCAmelCase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def A__ ( self : List[Any] ) -> int: # checks if the type can be inferred from the second record
'''simple docstring'''
lowercase : List[Any] =[{'''col_1''': []}, {'''col_1''': [1, 2]}]
lowercase : Union[str, Any] =Dataset.from_list(UpperCAmelCase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[str] =Dataset.from_list([] )
self.assertEqual(len(UpperCAmelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 94
| 1
|
__snake_case :Optional[int] ='\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__snake_case :Tuple =[{'type': 'code', 'content': INSTALL_CONTENT}]
__snake_case :Tuple ={
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 224
|
__snake_case :List[Any] =range(2, 20 + 1)
__snake_case :Dict =[10**k for k in range(ks[-1] + 1)]
__snake_case :dict[int, dict[int, list[list[int]]]] ={}
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
A = sum(a_i[j] for j in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ) )
A = sum(a_i[j] * base[j] for j in range(min(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) )
A , A = 0, 0
A = n - i
A = memo.get(lowerCAmelCase__ )
if sub_memo is not None:
A = sub_memo.get(lowerCAmelCase__ )
if jumps is not None and len(lowerCAmelCase__ ) > 0:
# find and make the largest jump without going over
A = -1
for _k in range(len(lowerCAmelCase__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A = _k
break
if max_jump >= 0:
A , A , A = jumps[max_jump]
# since the difference between jumps is cached, add c
A = diff + c
for j in range(min(lowerCAmelCase__ , len(lowerCAmelCase__ ) ) ):
A , A = divmod(lowerCAmelCase__ , 10 )
if new_c > 0:
add(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
A = []
else:
A = {c: []}
A = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A , A = next_term(lowerCAmelCase__ , k - 1 , i + dn , lowerCAmelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A , A = compute(lowerCAmelCase__ , lowerCAmelCase__ , i + dn , lowerCAmelCase__ )
diff += _diff
dn += terms_jumped
A = sub_memo[c]
# keep jumps sorted by # of terms skipped
A = 0
while j < len(lowerCAmelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCAmelCase__ , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(lowerCAmelCase__ ):
a_i.extend([0 for _ in range(k - len(lowerCAmelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A = i
A , A , A = 0, 0, 0
for j in range(len(lowerCAmelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A = ds_c + ds_b
diff += addend
A = 0
for j in range(lowerCAmelCase__ ):
A = a_i[j] + addend
A , A = divmod(lowerCAmelCase__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return diff, i - start_i
def lowerCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
for j in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ):
A = digits[j] + addend
if s >= 10:
A , A = divmod(lowerCAmelCase__ , 10 )
A = addend // 10 + quotient
else:
A = s
A = addend // 10
if addend == 0:
break
while addend > 0:
A , A = divmod(lowerCAmelCase__ , 10 )
digits.append(lowerCAmelCase__ )
def lowerCamelCase_ ( lowerCAmelCase__ : int = 10**15 ) -> int:
'''simple docstring'''
A = [1]
A = 1
A = 0
while True:
A , A = next_term(lowerCAmelCase__ , 20 , i + dn , lowerCAmelCase__ )
dn += terms_jumped
if dn == n - i:
break
A = 0
for j in range(len(lowerCAmelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 224
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
UpperCamelCase__ : Optional[int] = array[indexa], array[indexa]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
if length > 1:
UpperCamelCase__ : Dict = int(length / 2 )
for i in range(__lowerCAmelCase , low + middle ):
comp_and_swap(__lowerCAmelCase , __lowerCAmelCase , i + middle , __lowerCAmelCase )
bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
bitonic_merge(__lowerCAmelCase , low + middle , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
if length > 1:
UpperCamelCase__ : Any = int(length / 2 )
bitonic_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 )
bitonic_sort(__lowerCAmelCase , low + middle , __lowerCAmelCase , 0 )
bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase : List[Any] =input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase : List[str] =[int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 228
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_lowerCamelCase : List[str] = """\
"""
_lowerCamelCase : Optional[int] = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_lowerCamelCase : List[Any] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase ( datasets.Metric):
'''simple docstring'''
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def lowerCamelCase_ ( self : List[Any] , snake_case : Optional[int] , snake_case : int , snake_case : int = 16 , snake_case : bool = True , snake_case : Dict=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
SCREAMING_SNAKE_CASE : Union[str, Any] = 'cuda'
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained(snake_case )
SCREAMING_SNAKE_CASE : Dict = model.to(snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
SCREAMING_SNAKE_CASE : str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(snake_case ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
SCREAMING_SNAKE_CASE : List[str] = model.config.max_length - 1
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = model.config.max_length
SCREAMING_SNAKE_CASE : Tuple = tokenizer(
snake_case , add_special_tokens=snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , return_tensors='pt' , return_attention_mask=snake_case , ).to(snake_case )
SCREAMING_SNAKE_CASE : List[Any] = encodings['input_ids']
SCREAMING_SNAKE_CASE : Dict = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[Any] = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(snake_case ) , snake_case ) ):
SCREAMING_SNAKE_CASE : Dict = min(start_index + batch_size , len(snake_case ) )
SCREAMING_SNAKE_CASE : List[str] = encoded_texts[start_index:end_index]
SCREAMING_SNAKE_CASE : Optional[int] = attn_masks[start_index:end_index]
if add_start_token:
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
SCREAMING_SNAKE_CASE : int = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(snake_case ), attn_mask] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[Any] = encoded_batch
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(snake_case , attention_mask=snake_case ).logits
SCREAMING_SNAKE_CASE : Optional[Any] = out_logits[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE : Optional[int] = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE : Optional[int] = attn_mask[..., 1:].contiguous()
SCREAMING_SNAKE_CASE : Optional[int] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , snake_case ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(snake_case )}
| 352
| 0
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
class _UpperCamelCase ( A_ ):
'''simple docstring'''
def __init__( self : Tuple , *__lowercase : Dict , **__lowercase : List[str] ):
'''simple docstring'''
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 486
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def A_( A ):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCamelCase__ : Any = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class _UpperCamelCase ( A_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowercase : ArgumentParser ):
'''simple docstring'''
UpperCAmelCase_ = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=__lowercase , required=__lowercase , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=__lowercase , required=__lowercase , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=__lowercase , required=__lowercase , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=__lowercase , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=__lowercase , default=__lowercase , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Optional[int] , __lowercase : str , __lowercase : str , __lowercase : str , __lowercase : str , __lowercase : str , *__lowercase : Tuple , ):
'''simple docstring'''
UpperCAmelCase_ = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(F"""Loading model {model_type}""" )
UpperCAmelCase_ = model_type
UpperCAmelCase_ = tf_checkpoint
UpperCAmelCase_ = pytorch_dump_output
UpperCAmelCase_ = config
UpperCAmelCase_ = finetuning_task_name
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
if "ckpt" in self._tf_checkpoint.lower():
UpperCAmelCase_ = self._tf_checkpoint
UpperCAmelCase_ = """"""
else:
UpperCAmelCase_ = self._tf_checkpoint
UpperCAmelCase_ = """"""
convert_transfo_xl_checkpoint_to_pytorch(
__lowercase , self._config , self._pytorch_dump_output , __lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 486
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ) -> tuple:
'''simple docstring'''
UpperCAmelCase_ = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A ( UpperCamelCase__ ):
a__ : List[str] = """Salesforce/blip-image-captioning-base"""
a__ : Optional[Any] = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
a__ : str = """image_captioner"""
a__ : List[str] = AutoModelForVisionaSeq
a__ : int = ["""image"""]
a__ : Optional[Any] = ["""text"""]
def __init__(self : Any , *__a : Dict , **__a : Union[str, Any] ):
requires_backends(self , ["vision"] )
super().__init__(*__a , **__a )
def _lowercase (self : Union[str, Any] , __a : "Image" ):
return self.pre_processor(images=__a , return_tensors="pt" )
def _lowercase (self : List[str] , __a : Dict ):
return self.model.generate(**__a )
def _lowercase (self : int , __a : Optional[Any] ):
return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0].strip()
| 78
| 1
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowercase_ : Optional[int] = '\\n Text data.\n Second line of data.'
lowercase_ : List[Any] = 'file'
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
SCREAMING_SNAKE_CASE__: Any= bytes(snake_case_ , '''utf-8''' )
with zstd.open(snake_case_ , '''wb''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture
def A__ ( snake_case_ : int ):
with open(os.path.join(tmpfs.local_root_dir , snake_case_ ) , '''w''' ) as f:
f.write(snake_case_ )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def A__ ( snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: List[str]= {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
SCREAMING_SNAKE_CASE__: str= input_paths[compression_format]
SCREAMING_SNAKE_CASE__: List[str]= tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__: Dict= DownloadConfig(cache_dir=snake_case_ , extract_compressed_file=snake_case_ )
SCREAMING_SNAKE_CASE__: List[Any]= cached_path(snake_case_ , download_config=snake_case_ )
with open(snake_case_ ) as f:
SCREAMING_SNAKE_CASE__: int= f.read()
with open(snake_case_ ) as f:
SCREAMING_SNAKE_CASE__: Optional[int]= f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def A__ ( snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__: List[str]= '''custom_cache'''
SCREAMING_SNAKE_CASE__: Any= '''custom_extracted_dir'''
SCREAMING_SNAKE_CASE__: List[Any]= tmp_path / '''custom_extracted_path'''
if default_extracted:
SCREAMING_SNAKE_CASE__: Union[str, Any]= ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , snake_case_ )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Optional[int]= custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
SCREAMING_SNAKE_CASE__: Any= xz_file
SCREAMING_SNAKE_CASE__: Union[str, Any]= (
DownloadConfig(extract_compressed_file=snake_case_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=snake_case_ )
)
SCREAMING_SNAKE_CASE__: Any= cached_path(snake_case_ , download_config=snake_case_ )
assert Path(snake_case_ ).parent.parts[-2:] == expected
def A__ ( snake_case_ : str ):
# absolute path
SCREAMING_SNAKE_CASE__: Optional[int]= str(Path(snake_case_ ).resolve() )
assert cached_path(snake_case_ ) == text_file
# relative path
SCREAMING_SNAKE_CASE__: Union[str, Any]= str(Path(snake_case_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(snake_case_ ) == text_file
def A__ ( snake_case_ : Dict ):
# absolute path
SCREAMING_SNAKE_CASE__: List[Any]= str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(snake_case_ ):
cached_path(snake_case_ )
# relative path
SCREAMING_SNAKE_CASE__: int= '''./__missing_file__.txt'''
with pytest.raises(snake_case_ ):
cached_path(snake_case_ )
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= get_from_cache(F'tmp://{tmpfs_file}' )
with open(snake_case_ ) as f:
SCREAMING_SNAKE_CASE__: List[str]= f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case_ )
def A__ ( ):
with pytest.raises(snake_case_ ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case_ )
def A__ ( snake_case_ : int ):
SCREAMING_SNAKE_CASE__: Dict= tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(snake_case_ ):
http_get('''https://huggingface.co''' , temp_file=snake_case_ )
with pytest.raises(snake_case_ ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case_ )
def A__ ( snake_case_ : Dict ):
SCREAMING_SNAKE_CASE__: List[Any]= tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(snake_case_ ):
ftp_get('''ftp://huggingface.co''' , temp_file=snake_case_ )
with pytest.raises(snake_case_ ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case_ )
def A__ ( snake_case_ : int ):
SCREAMING_SNAKE_CASE__: List[str]= tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(snake_case_ ):
fsspec_get('''s3://huggingface.co''' , temp_file=snake_case_ )
with pytest.raises(snake_case_ ):
fsspec_head('''s3://huggingface.co''' )
| 107
|
import re
def A__ ( snake_case_ : str ):
if len(re.findall('''[ATCG]''' , snake_case_ ) ) != len(snake_case_ ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107
| 1
|
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__A : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _a ( lowerCAmelCase_):
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCamelCase : CLIPSegForImageSegmentation , __UpperCamelCase : CLIPSegProcessor , __UpperCamelCase : AutoencoderKL , __UpperCamelCase : CLIPTextModel , __UpperCamelCase : CLIPTokenizer , __UpperCamelCase : UNetaDConditionModel , __UpperCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase : StableDiffusionSafetyChecker , __UpperCamelCase : CLIPImageProcessor , )->List[Any]:
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
_UpperCAmelCase = (
F'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
F' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , SCREAMING_SNAKE_CASE__ , standard_warn=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = dict(scheduler.config )
_UpperCAmelCase = 1
_UpperCAmelCase = FrozenDict(SCREAMING_SNAKE_CASE__ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
_UpperCAmelCase = (
F'The configuration file of this scheduler: {scheduler} has not set the configuration'
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , SCREAMING_SNAKE_CASE__ , standard_warn=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = dict(scheduler.config )
_UpperCAmelCase = True
_UpperCAmelCase = FrozenDict(SCREAMING_SNAKE_CASE__ )
if safety_checker is None:
logger.warning(
F'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE__ , segmentation_processor=SCREAMING_SNAKE_CASE__ , vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , )
def lowercase__ ( self : str , __UpperCamelCase : Optional[Union[str, int]] = "auto" )->List[str]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE__ )
def lowercase__ ( self : Tuple )->List[str]:
self.enable_attention_slicing(SCREAMING_SNAKE_CASE__ )
def lowercase__ ( self : Dict )->str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_UpperCAmelCase = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowercase__ ( self : Optional[int] )->Optional[int]:
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Union[str, Any] , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : Union[torch.FloatTensor, PIL.Image.Image] , __UpperCamelCase : str , __UpperCamelCase : int = 5_1_2 , __UpperCamelCase : int = 5_1_2 , __UpperCamelCase : int = 5_0 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : List[Any] , )->List[str]:
_UpperCAmelCase = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
_UpperCAmelCase = self.segmentation_model(**SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
_UpperCAmelCase = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
_UpperCAmelCase = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , height=SCREAMING_SNAKE_CASE__ , width=SCREAMING_SNAKE_CASE__ , num_inference_steps=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , negative_prompt=SCREAMING_SNAKE_CASE__ , num_images_per_prompt=SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , latents=SCREAMING_SNAKE_CASE__ , output_type=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , callback=SCREAMING_SNAKE_CASE__ , callback_steps=SCREAMING_SNAKE_CASE__ , )
| 602
|
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int = 16 , SCREAMING_SNAKE_CASE__ : int = 88 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "geglu" , SCREAMING_SNAKE_CASE__ : Optional[int] = None , ) -> Optional[int]:
super().__init__()
A : Tuple =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=SCREAMING_SNAKE_CASE__ , attention_head_dim=SCREAMING_SNAKE_CASE__ , in_channels=SCREAMING_SNAKE_CASE__ , num_layers=SCREAMING_SNAKE_CASE__ , dropout=SCREAMING_SNAKE_CASE__ , norm_num_groups=SCREAMING_SNAKE_CASE__ , cross_attention_dim=SCREAMING_SNAKE_CASE__ , attention_bias=SCREAMING_SNAKE_CASE__ , sample_size=SCREAMING_SNAKE_CASE__ , num_vector_embeds=SCREAMING_SNAKE_CASE__ , activation_fn=SCREAMING_SNAKE_CASE__ , num_embeds_ada_norm=SCREAMING_SNAKE_CASE__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
A : List[Any] =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
A : str =[77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
A : Optional[int] =[1, 0]
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : bool = True , ) -> Dict:
A : Any =hidden_states
A : int =[]
A : str =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
A : Optional[int] =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
A : str =self.transformer_index_for_condition[i]
A : str =self.transformers[transformer_index](
SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , cross_attention_kwargs=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
A : str =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
A : Any =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=SCREAMING_SNAKE_CASE__ )
| 305
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __A( __UpperCAmelCase ):
__A = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
__A = "CIDAS/clipseg-rd64-refined"
__A = "image_segmenter"
__A = CLIPSegForImageSegmentation
__A = ["image", "text"]
__A = ["image"]
def __init__( self, *A, **A ):
"""simple docstring"""
requires_backends(self, ['''vision'''] )
super().__init__(*__A, **__A )
def _UpperCamelCase ( self, A, A ):
"""simple docstring"""
return self.pre_processor(text=[label], images=[image], padding=__A, return_tensors='''pt''' )
def _UpperCamelCase ( self, A ):
"""simple docstring"""
with torch.no_grad():
_UpperCamelCase = self.model(**__A ).logits
return logits
def _UpperCamelCase ( self, A ):
"""simple docstring"""
_UpperCamelCase = outputs.cpu().detach().numpy()
_UpperCamelCase = 0
_UpperCamelCase = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 718
|
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
_UpperCamelCase = []
if len(lowerCAmelCase ) == 1:
return [nums.copy()]
for _ in range(len(lowerCAmelCase ) ):
_UpperCamelCase = nums.pop(0 )
_UpperCamelCase = permute(lowerCAmelCase )
for perm in permutations:
perm.append(lowerCAmelCase )
result.extend(lowerCAmelCase )
nums.append(lowerCAmelCase )
return result
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
def backtrack(lowerCAmelCase ):
if start == len(lowerCAmelCase ) - 1:
output.append(nums[:] )
else:
for i in range(lowerCAmelCase , len(lowerCAmelCase ) ):
_UpperCamelCase , _UpperCamelCase = nums[i], nums[start]
backtrack(start + 1 )
_UpperCamelCase , _UpperCamelCase = nums[i], nums[start] # backtrack
_UpperCamelCase = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowercase : Optional[Any] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 105
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowerCAmelCase__ : Tuple = mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
lowerCAmelCase__ : Union[str, Any] = max(
mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , j - wt[i - 1] ) + val[i - 1] , )
lowerCAmelCase__ : Optional[Any] = val
return f[i][j]
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Any = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowerCAmelCase__ : Any = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowerCAmelCase__ : List[str] = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if not (isinstance(lowerCamelCase_ , (list, tuple) ) and isinstance(lowerCamelCase_ , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
lowerCAmelCase__ : List[str] = len(lowerCamelCase_ )
if num_items != len(lowerCamelCase_ ):
lowerCAmelCase__ : Optional[int] = (
"The number of weights must be the same as the number of values.\n"
f'''But got {num_items} weights and {len(lowerCamelCase_ )} values'''
)
raise ValueError(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
if not isinstance(wt[i] , lowerCamelCase_ ):
lowerCAmelCase__ : Tuple = (
"All weights must be integers but got weight of "
f'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(lowerCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ : int = knapsack(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ : set = set()
_construct_solution(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return optimal_val, example_optional_set
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowerCamelCase_ , lowerCamelCase_ , i - 1 , lowerCamelCase_ , lowerCamelCase_ )
else:
optimal_set.add(lowerCamelCase_ )
_construct_solution(lowerCamelCase_ , lowerCamelCase_ , i - 1 , j - wt[i - 1] , lowerCamelCase_ )
if __name__ == "__main__":
snake_case = [3, 2, 4, 4]
snake_case = [4, 3, 2, 3]
snake_case = 4
snake_case = 6
snake_case = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
snake_case , snake_case = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
snake_case , snake_case = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 378
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : List[str] = """mobilenet_v2"""
def __init__( self : Optional[int] , a__ : Dict=3 , a__ : str=224 , a__ : int=1.0 , a__ : Dict=8 , a__ : Dict=8 , a__ : Dict=6 , a__ : Optional[Any]=32 , a__ : List[str]=True , a__ : int=True , a__ : List[str]="relu6" , a__ : List[str]=True , a__ : List[Any]=0.8 , a__ : Tuple=0.02 , a__ : Tuple=0.001 , a__ : Optional[Any]=255 , **a__ : str , ):
'''simple docstring'''
super().__init__(**a__ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCAmelCase__ : Any = num_channels
lowerCAmelCase__ : Union[str, Any] = image_size
lowerCAmelCase__ : Tuple = depth_multiplier
lowerCAmelCase__ : Optional[int] = depth_divisible_by
lowerCAmelCase__ : Union[str, Any] = min_depth
lowerCAmelCase__ : List[str] = expand_ratio
lowerCAmelCase__ : Union[str, Any] = output_stride
lowerCAmelCase__ : Optional[int] = first_layer_is_expansion
lowerCAmelCase__ : Tuple = finegrained_output
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Optional[Any] = tf_padding
lowerCAmelCase__ : Union[str, Any] = classifier_dropout_prob
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : str = layer_norm_eps
lowerCAmelCase__ : int = semantic_loss_ignore_index
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : Optional[int] = version.parse("""1.11""" )
@property
def _A ( self : List[Any] ):
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _A ( self : Optional[int] ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _A ( self : List[str] ):
'''simple docstring'''
return 1e-4
| 378
| 1
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class snake_case__:
"""simple docstring"""
@staticmethod
def snake_case ( *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Dict ):
pass
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = np.array(lowerCamelCase__ )
lowercase__ : List[Any] = npimg.shape
return {"hash": hashimage(lowerCamelCase__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
lowercase_ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowercase_ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Dict = MaskGenerationPipeline(model=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def snake_case ( self : int ):
pass
@slow
@require_torch
def snake_case ( self : Dict ):
lowercase__ : Any = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
lowercase__ : Union[str, Any] = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
lowercase__ : str = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_444},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.021},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_167},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_132},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_053},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.9_967},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.993},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.9_909},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.9_879},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.9_834},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.9_716},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.9_612},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.9_599},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.9_552},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.9_532},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.9_516},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.9_499},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.9_483},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.9_464},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.943},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.943},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.9_408},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.9_335},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.9_326},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.9_262},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.8_999},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.8_986},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.8_984},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.8_873},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def snake_case ( self : Dict ):
lowercase__ : int = "facebook/sam-vit-huge"
lowercase__ : Optional[Any] = pipeline("mask-generation" , model=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
lowercase__ : int = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_444},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_210},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_167},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_132},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_053},
] , )
| 718
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81
| 0
|
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowercase__ = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
lowercase__ = get_tests_dir("""fixtures/vocab.json""")
lowercase__ = get_tests_dir("""fixtures""")
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def A_ ( self ):
_lowerCamelCase : List[str] = 0
def A_ ( self ):
_lowerCamelCase : Any = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase , lowercase )
def A_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Any = WavaVecaConfig()
_lowerCamelCase : Optional[int] = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
# save in new folder
model_config.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
_lowerCamelCase : Optional[int] = AutoProcessor.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
def A_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(lowercase , os.path.join(lowercase , lowercase ) )
copyfile(lowercase , os.path.join(lowercase , 'vocab.json' ) )
_lowerCamelCase : str = AutoProcessor.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
def A_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : List[str] = WavaVecaFeatureExtractor()
_lowerCamelCase : int = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
_lowerCamelCase : List[Any] = WavaVecaProcessor(lowercase , lowercase )
# save in new folder
processor.save_pretrained(lowercase )
# drop `processor_class` in tokenizer
with open(os.path.join(lowercase , lowercase ) , 'r' ) as f:
_lowerCamelCase : Dict = json.load(lowercase )
config_dict.pop('processor_class' )
with open(os.path.join(lowercase , lowercase ) , 'w' ) as f:
f.write(json.dumps(lowercase ) )
_lowerCamelCase : Optional[int] = AutoProcessor.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
def A_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor()
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
_lowerCamelCase : Dict = WavaVecaProcessor(lowercase , lowercase )
# save in new folder
processor.save_pretrained(lowercase )
# drop `processor_class` in feature extractor
with open(os.path.join(lowercase , lowercase ) , 'r' ) as f:
_lowerCamelCase : List[Any] = json.load(lowercase )
config_dict.pop('processor_class' )
with open(os.path.join(lowercase , lowercase ) , 'w' ) as f:
f.write(json.dumps(lowercase ) )
_lowerCamelCase : Tuple = AutoProcessor.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
def A_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : str = WavaVecaConfig(processor_class='Wav2Vec2Processor' )
model_config.save_pretrained(lowercase )
# copy relevant files
copyfile(lowercase , os.path.join(lowercase , 'vocab.json' ) )
# create emtpy sample processor
with open(os.path.join(lowercase , lowercase ) , 'w' ) as f:
f.write('{}' )
_lowerCamelCase : List[Any] = AutoProcessor.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
def A_ ( self ):
with self.assertRaises(lowercase ):
_lowerCamelCase : List[Any] = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase ):
_lowerCamelCase : Optional[int] = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase )
_lowerCamelCase : int = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
_lowerCamelCase : Any = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
_lowerCamelCase : Dict = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
_lowerCamelCase : List[Any] = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase , use_fast=lowercase )
_lowerCamelCase : Optional[int] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def A_ ( self ):
try:
AutoConfig.register('custom' , lowercase )
AutoFeatureExtractor.register(lowercase , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
AutoProcessor.register(lowercase , lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoProcessor.register(lowercase , lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : str = CustomFeatureExtractor.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : List[str] = os.path.join(lowercase , 'vocab.txt' )
with open(lowercase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCamelCase : List[str] = CustomTokenizer(lowercase )
_lowerCamelCase : Tuple = CustomProcessor(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(lowercase )
_lowerCamelCase : Tuple = AutoProcessor.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self ):
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = False
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = False
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = '''AutoFeatureExtractor'''
lowerCamelCase__ = '''AutoTokenizer'''
lowerCamelCase__ = False
try:
AutoConfig.register('custom' , lowercase )
AutoFeatureExtractor.register(lowercase , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
AutoProcessor.register(lowercase , lowercase )
# If remote code is not set, the default is to use local classes.
_lowerCamelCase : str = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
_lowerCamelCase : int = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
_lowerCamelCase : Union[str, Any] = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=lowercase )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self ):
_lowerCamelCase : Optional[int] = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(processor.__class__.__name__ , 'BertTokenizerFast' )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' )
self.assertEqual(processor.__class__.__name__ , 'ConvNextImageProcessor' )
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def A_ ( cls ):
_lowerCamelCase : Union[str, Any] = TOKEN
HfFolder.save_token(lowercase )
@classmethod
def A_ ( cls ):
try:
delete_repo(token=cls._token , repo_id='test-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-processor' )
except HTTPError:
pass
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = WavaVecaProcessor.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowercase , 'test-processor' ) , push_to_hub=lowercase , use_auth_token=self._token )
_lowerCamelCase : Union[str, Any] = WavaVecaProcessor.from_pretrained(F'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowercase , getattr(new_processor.feature_extractor , lowercase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = WavaVecaProcessor.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowercase , 'test-processor-org' ) , push_to_hub=lowercase , use_auth_token=self._token , organization='valid_org' , )
_lowerCamelCase : List[str] = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowercase , getattr(new_processor.feature_extractor , lowercase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
_lowerCamelCase : List[Any] = CustomFeatureExtractor.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : List[str] = os.path.join(lowercase , 'vocab.txt' )
with open(lowercase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCamelCase : Dict = CustomTokenizer(lowercase )
_lowerCamelCase : Dict = CustomProcessor(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'''{USER}/test-dynamic-processor''' , token=self._token )
_lowerCamelCase : Tuple = Repository(lowercase , clone_from=F'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(lowercase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(lowercase , 'tokenizer_config.json' ) ) as f:
_lowerCamelCase : Optional[Any] = json.load(lowercase )
self.assertDictEqual(
tokenizer_config['auto_map'] , {
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(lowercase , 'custom_feature_extraction.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowercase , 'custom_tokenization.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowercase , 'custom_processing.py' ) ) )
repo.push_to_hub()
_lowerCamelCase : Optional[Any] = AutoProcessor.from_pretrained(F'''{USER}/test-dynamic-processor''' , trust_remote_code=lowercase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , 'CustomProcessor' )
| 630
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_A = logging.get_logger(__name__)
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( snake_case , snake_case , unittest.TestCase ):
__a : List[Any] = UNetaDModel
__a : Union[str, Any] = '''sample'''
@property
def _snake_case ( self ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = 4
__SCREAMING_SNAKE_CASE : Dict = 3
__SCREAMING_SNAKE_CASE : Union[str, Any] = (3_2, 3_2)
__SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([1_0] ).to(lowercase )
return {"sample": noise, "timestep": time_step}
@property
def _snake_case ( self ) -> Tuple:
'''simple docstring'''
return (3, 3_2, 3_2)
@property
def _snake_case ( self ) -> List[Any]:
'''simple docstring'''
return (3, 3_2, 3_2)
def _snake_case ( self ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''block_out_channels''': (3_2, 6_4),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 3_2,
}
__SCREAMING_SNAKE_CASE : int = self.dummy_input
return init_dict, inputs_dict
class SCREAMING_SNAKE_CASE_ ( snake_case , snake_case , unittest.TestCase ):
__a : str = UNetaDModel
__a : Optional[Any] = '''sample'''
@property
def _snake_case ( self ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = 4
__SCREAMING_SNAKE_CASE : Any = 4
__SCREAMING_SNAKE_CASE : str = (3_2, 3_2)
__SCREAMING_SNAKE_CASE : int = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase )
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([1_0] ).to(lowercase )
return {"sample": noise, "timestep": time_step}
@property
def _snake_case ( self ) -> Any:
'''simple docstring'''
return (4, 3_2, 3_2)
@property
def _snake_case ( self ) -> int:
'''simple docstring'''
return (4, 3_2, 3_2)
def _snake_case ( self ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = {
'''sample_size''': 3_2,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (3_2, 6_4),
'''attention_head_dim''': 3_2,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(lowercase )
__SCREAMING_SNAKE_CASE : int = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _snake_case ( self ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=lowercase )
model.to(lowercase )
__SCREAMING_SNAKE_CASE : int = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _snake_case ( self ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=lowercase )
model_accelerate.to(lowercase )
model_accelerate.eval()
__SCREAMING_SNAKE_CASE : str = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
__SCREAMING_SNAKE_CASE : str = noise.to(lowercase )
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([1_0] * noise.shape[0] ).to(lowercase )
__SCREAMING_SNAKE_CASE : int = model_accelerate(lowercase , lowercase )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=lowercase , low_cpu_mem_usage=lowercase )
model_normal_load.to(lowercase )
model_normal_load.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model_normal_load(lowercase , lowercase )['''sample''']
assert torch_all_close(lowercase , lowercase , rtol=1e-3 )
def _snake_case ( self ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(lowercase )
__SCREAMING_SNAKE_CASE : Dict = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__SCREAMING_SNAKE_CASE : Tuple = noise.to(lowercase )
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([1_0] * noise.shape[0] ).to(lowercase )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowercase , lowercase ).sample
__SCREAMING_SNAKE_CASE : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1e-3 ) )
class SCREAMING_SNAKE_CASE_ ( snake_case , snake_case , unittest.TestCase ):
__a : List[str] = UNetaDModel
__a : Any = '''sample'''
@property
def _snake_case ( self , lowercase=(3_2, 3_2) ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = 4
__SCREAMING_SNAKE_CASE : int = 3
__SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase )
__SCREAMING_SNAKE_CASE : Any = torch.tensor(batch_size * [1_0] ).to(dtype=torch.intaa , device=lowercase )
return {"sample": noise, "timestep": time_step}
@property
def _snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
return (3, 3_2, 3_2)
@property
def _snake_case ( self ) -> List[str]:
'''simple docstring'''
return (3, 3_2, 3_2)
def _snake_case ( self ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = {
'''block_out_channels''': [3_2, 6_4, 6_4, 6_4],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
__SCREAMING_SNAKE_CASE : int = self.dummy_input
return init_dict, inputs_dict
@slow
def _snake_case ( self ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(lowercase )
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_input
__SCREAMING_SNAKE_CASE : str = floats_tensor((4, 3) + (2_5_6, 2_5_6) ).to(lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = noise
__SCREAMING_SNAKE_CASE : Dict = model(**lowercase )
assert image is not None, "Make sure output is not None"
@slow
def _snake_case ( self ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : str = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(lowercase )
__SCREAMING_SNAKE_CASE : Any = 4
__SCREAMING_SNAKE_CASE : Dict = 3
__SCREAMING_SNAKE_CASE : Tuple = (2_5_6, 2_5_6)
__SCREAMING_SNAKE_CASE : Dict = torch.ones((batch_size, num_channels) + sizes ).to(lowercase )
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor(batch_size * [1e-4] ).to(lowercase )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Any = model(lowercase , lowercase ).sample
__SCREAMING_SNAKE_CASE : int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__SCREAMING_SNAKE_CASE : str = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1e-2 ) )
def _snake_case ( self ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(lowercase )
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : str = 3
__SCREAMING_SNAKE_CASE : Optional[Any] = (3_2, 3_2)
__SCREAMING_SNAKE_CASE : str = torch.ones((batch_size, num_channels) + sizes ).to(lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(lowercase )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Dict = model(lowercase , lowercase ).sample
__SCREAMING_SNAKE_CASE : str = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1e-2 ) )
def _snake_case ( self ) -> str:
'''simple docstring'''
pass
| 158
| 0
|
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCamelCase :int = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
lowerCamelCase :Optional[int] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
lowerCamelCase :int = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def _a (self , lowercase , lowercase , lowercase=None , lowercase=True , lowercase=False ):
if rouge_types is None:
A_ : List[Any] = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
A_ : List[Any] = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase )
if use_aggregator:
A_ : Tuple = scoring.BootstrapAggregator()
else:
A_ : Dict = []
for ref, pred in zip(lowercase , lowercase ):
A_ : Any = scorer.score(lowercase , lowercase )
if use_aggregator:
aggregator.add_scores(lowercase )
else:
scores.append(lowercase )
if use_aggregator:
A_ : List[str] = aggregator.aggregate()
else:
A_ : List[str] = {}
for key in scores[0]:
A_ : Dict = [score[key] for score in scores]
return result
| 716
|
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCamelCase__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCamelCase__ ):
http_head("""https://huggingface.co""" )
| 686
| 0
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''nielsr/canine-s''': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
__magic_name__ = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
__magic_name__ = 0
__magic_name__ = 0XE_000
__magic_name__ = 0XE_001
__magic_name__ = 0XE_002
__magic_name__ = 0XE_003
__magic_name__ = 0XE_004
# Maps special codepoints to human-readable names.
__magic_name__ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: '''[CLS]''',
SEP: '''[SEP]''',
BOS: '''[BOS]''',
MASK: '''[MASK]''',
PAD: '''[PAD]''',
RESERVED: '''[RESERVED]''',
}
# Maps special codepoint human-readable names to their codepoint values.
__magic_name__ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , a_=chr(a_ ) , a_=chr(a_ ) , a_=chr(a_ ) , a_=chr(a_ ) , a_=chr(a_ ) , a_=chr(a_ ) , a_=False , a_=2048 , **a_ , ):
lowerCamelCase_ : int = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else bos_token
lowerCamelCase_ : int = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else eos_token
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else sep_token
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else cls_token
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : List[Any] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
super().__init__(
bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , pad_token=a_ , mask_token=a_ , add_prefix_space=a_ , model_max_length=a_ , **a_ , )
# Creates a mapping for looking up the IDs of special symbols.
lowerCamelCase_ : Optional[int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCamelCase_ : Tuple = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCamelCase_ : List[Any] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCamelCase_ : str = UNICODE_VOCAB_SIZE
lowerCamelCase_ : Any = len(self._special_codepoints )
@property
def _UpperCamelCase ( self ):
return self._unicode_vocab_size
def _UpperCamelCase ( self , a_ ):
return list(a_ )
def _UpperCamelCase ( self , a_ ):
try:
return ord(a_ )
except TypeError:
raise ValueError(F"""invalid token: '{token}'""" )
def _UpperCamelCase ( self , a_ ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(a_ )
except TypeError:
raise ValueError(F"""invalid id: {index}""" )
def _UpperCamelCase ( self , a_ ):
return "".join(a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : List[str] = [self.sep_token_id]
lowerCamelCase_ : Dict = [self.cls_token_id]
lowerCamelCase_ : Optional[Any] = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
lowerCamelCase_ : List[Any] = [1] + ([0] * len(a_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(a_ )) + [1]
return result
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : str = [self.sep_token_id]
lowerCamelCase_ : int = [self.cls_token_id]
lowerCamelCase_ : Optional[int] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _UpperCamelCase ( self , a_ , a_ = None ):
return ()
| 250
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase ( __magic_name__ ):
_a = """segformer"""
def __init__( self , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=[2, 2, 2, 2] , UpperCamelCase=[8, 4, 2, 1] , UpperCamelCase=[32, 64, 160, 256] , UpperCamelCase=[7, 3, 3, 3] , UpperCamelCase=[4, 2, 2, 2] , UpperCamelCase=[1, 2, 5, 8] , UpperCamelCase=[4, 4, 4, 4] , UpperCamelCase="gelu" , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase=0.02 , UpperCamelCase=0.1 , UpperCamelCase=1e-6 , UpperCamelCase=256 , UpperCamelCase=255 , **UpperCamelCase , ) -> int:
super().__init__(**UpperCamelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCamelCase , )
__a = num_channels
__a = num_encoder_blocks
__a = depths
__a = sr_ratios
__a = hidden_sizes
__a = patch_sizes
__a = strides
__a = mlp_ratios
__a = num_attention_heads
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = classifier_dropout_prob
__a = initializer_range
__a = drop_path_rate
__a = layer_norm_eps
__a = decoder_hidden_size
__a = kwargs.get('reshape_last_stage' , UpperCamelCase )
__a = semantic_loss_ignore_index
class __lowercase ( __magic_name__ ):
_a = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase__ ( self ) -> float:
return 1e-4
@property
def UpperCamelCase__ ( self ) -> int:
return 12
| 539
| 0
|
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case=0 ):
# Format the message.
if name is None:
__lowerCAmelCase = None
else:
__lowerCAmelCase = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
__lowerCAmelCase = fmt.format(__snake_case )
# Print and recurse (if needed).
if isinstance(__snake_case , __snake_case ):
if msg is not None:
print(__snake_case )
for k in val.keys():
recursive_print(__snake_case , val[k] , spaces + 2 )
elif isinstance(__snake_case , torch.Tensor ):
print(__snake_case , ":" , val.size() )
else:
print(__snake_case , ":" , __snake_case )
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
__lowerCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
__lowerCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
__lowerCAmelCase = param.view(*__snake_case )
__lowerCAmelCase = param.transpose(0 , 2 )
__lowerCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
__lowerCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
__lowerCAmelCase = param.view(*__snake_case )
__lowerCAmelCase = param.transpose(0 , 1 ).contiguous()
__lowerCAmelCase = param.view(*__snake_case )
return param
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
# The converted output model.
__lowerCAmelCase = {}
# old versions did not store training args
__lowerCAmelCase = input_state_dict.get("args" , __snake_case )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
__lowerCAmelCase = ds_args.padded_vocab_size
__lowerCAmelCase = ds_args.max_position_embeddings
__lowerCAmelCase = ds_args.hidden_size
__lowerCAmelCase = ds_args.num_layers
__lowerCAmelCase = ds_args.num_attention_heads
__lowerCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
__lowerCAmelCase = config.n_head
# The hidden_size per head.
__lowerCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
__lowerCAmelCase = input_state_dict["checkpoint_version"]
else:
__lowerCAmelCase = 0.0
# The model.
__lowerCAmelCase = input_state_dict["model"]
# The language model.
__lowerCAmelCase = model["language_model"]
# The embeddings.
__lowerCAmelCase = lm["embedding"]
# The word embeddings.
__lowerCAmelCase = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
__lowerCAmelCase = word_embeddings[: config.vocab_size, :]
__lowerCAmelCase = word_embeddings
# The position embeddings.
__lowerCAmelCase = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
__lowerCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
__lowerCAmelCase = pos_embeddings
# The transformer.
__lowerCAmelCase = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
__lowerCAmelCase = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
__lowerCAmelCase = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
__lowerCAmelCase = layer_re.match(__snake_case )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
__lowerCAmelCase = int(m.group(1 ) )
# The name of the operation.
__lowerCAmelCase = m.group(2 )
# Is it a weight or a bias?
__lowerCAmelCase = m.group(3 )
# The name of the layer.
__lowerCAmelCase = F"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
__lowerCAmelCase = "ln_1" if op_name.startswith("input" ) else "ln_2"
__lowerCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
__lowerCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __snake_case , __snake_case )
__lowerCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
__lowerCAmelCase = torch.tensor(-1E4 , dtype=torch.floataa )
__lowerCAmelCase = masked_bias
__lowerCAmelCase = fix_query_key_value_ordering(__snake_case , __snake_case , 3 , __snake_case , __snake_case )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
__lowerCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
__lowerCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
__lowerCAmelCase = fix_query_key_value_ordering(__snake_case , __snake_case , 3 , __snake_case , __snake_case )
# Store. No change of shape.
__lowerCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
__lowerCAmelCase = megatron_to_transformers[op_name]
__lowerCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
__lowerCAmelCase = megatron_to_transformers[op_name]
__lowerCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
__lowerCAmelCase = transformer["final_layernorm.weight"]
__lowerCAmelCase = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
__lowerCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def __lowerCAmelCase ( ):
# Create the argument parser.
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=__snake_case , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=__snake_case , help="An optional config json file describing the pre-trained model." , )
__lowerCAmelCase = parser.parse_args()
# Extract the basename.
__lowerCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
__lowerCAmelCase = torch.load(__snake_case , map_location="cpu" )
else:
__lowerCAmelCase = torch.load(args.path_to_checkpoint , map_location="cpu" )
__lowerCAmelCase = input_state_dict.get("args" , __snake_case )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
__lowerCAmelCase = "gelu_fast"
elif ds_args.openai_gelu:
__lowerCAmelCase = "gelu_new"
else:
__lowerCAmelCase = "gelu"
else:
# in the very early days this used to be "gelu_new"
__lowerCAmelCase = "gelu_new"
# Spell out all parameters in case the defaults change.
__lowerCAmelCase = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=__snake_case , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=__snake_case , summary_activation=__snake_case , summary_proj_to_labels=__snake_case , summary_first_dropout=0.1 , scale_attn_weights=__snake_case , use_cache=__snake_case , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
__lowerCAmelCase = GPTaConfig.from_json_file(args.config_file )
__lowerCAmelCase = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
__lowerCAmelCase = convert_megatron_checkpoint(__snake_case , __snake_case , __snake_case )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__snake_case , __snake_case )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
__lowerCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
__lowerCAmelCase = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
__lowerCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
__lowerCAmelCase = "gpt2"
__lowerCAmelCase = AutoTokenizer.from_pretrained(__snake_case )
__lowerCAmelCase = type(__snake_case ).__name__
__lowerCAmelCase = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(__snake_case )
# Save tokenizer based on args
print(F"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(__snake_case )
# Store the state_dict to file.
__lowerCAmelCase = os.path.join(__snake_case , "pytorch_model.bin" )
print(F"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(__snake_case , __snake_case )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 290
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCamelCase (a_ ):
snake_case_ = """blenderbot-small"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __UpperCamelCase=5_0_2_6_5 , __UpperCamelCase=5_1_2 , __UpperCamelCase=8 , __UpperCamelCase=2_0_4_8 , __UpperCamelCase=1_6 , __UpperCamelCase=8 , __UpperCamelCase=2_0_4_8 , __UpperCamelCase=1_6 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="gelu" , __UpperCamelCase=5_1_2 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1 , __UpperCamelCase=False , __UpperCamelCase=0 , __UpperCamelCase=1 , __UpperCamelCase=2 , __UpperCamelCase=2 , **__UpperCamelCase , )-> Any:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
class _UpperCamelCase (a_ ):
@property
def __UpperCAmelCase ( self )-> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__lowerCAmelCase = {0: "batch"}
__lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
__lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__lowerCAmelCase , __lowerCAmelCase = self.num_layers
for i in range(__UpperCamelCase ):
__lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
__lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
else:
__lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self )-> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase = super().outputs
else:
__lowerCAmelCase = super(__UpperCamelCase , self ).outputs
if self.use_past:
__lowerCAmelCase , __lowerCAmelCase = self.num_layers
for i in range(__UpperCamelCase ):
__lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
__lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , )-> Mapping[str, Any]:
__lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Generate decoder inputs
__lowerCAmelCase = seq_length if not self.use_past else 1
__lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__lowerCAmelCase = dict(**__UpperCamelCase , **__UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["input_ids"].shape
__lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1]
__lowerCAmelCase , __lowerCAmelCase = self.num_attention_heads
__lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCAmelCase = decoder_seq_length + 3
__lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowerCAmelCase = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 )
__lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowerCAmelCase , __lowerCAmelCase = self.num_layers
__lowerCAmelCase = min(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers
__lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__UpperCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
) )
# TODO: test this.
__lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__UpperCamelCase , __UpperCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) )
return common_inputs
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , )-> Mapping[str, Any]:
__lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase , __lowerCAmelCase = self.num_layers
__lowerCAmelCase , __lowerCAmelCase = self.num_attention_heads
__lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCAmelCase = common_inputs["attention_mask"].dtype
__lowerCAmelCase = torch.cat(
[common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
__lowerCAmelCase = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase )
]
return common_inputs
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , )-> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCAmelCase = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCAmelCase = tokenizer.num_special_tokens_to_add(__UpperCamelCase )
__lowerCAmelCase = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
__lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowerCAmelCase = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) )
return common_inputs
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , )-> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
elif self.task == "causal-lm":
__lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
else:
__lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
return common_inputs
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[Any]:
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
__lowerCAmelCase = super(__UpperCamelCase , self )._flatten_past_key_values_(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
| 290
| 1
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCamelCase ( TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
'''simple docstring'''
def __init__( self , lowerCAmelCase=None , **lowerCAmelCase ):
super().__init__(features=lowerCAmelCase )
UpperCAmelCase_ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def A__ ( self , lowerCAmelCase ):
import torch
if isinstance(lowerCAmelCase , lowerCAmelCase ) and column:
if all(
isinstance(lowerCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCAmelCase )
return column
def A__ ( self , lowerCAmelCase ):
import torch
if isinstance(lowerCAmelCase , (str, bytes, type(lowerCAmelCase )) ):
return value
elif isinstance(lowerCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase_ = {}
if isinstance(lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
UpperCAmelCase_ = {"dtype": torch.intaa}
elif isinstance(lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase_ = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCAmelCase , PIL.Image.Image ):
UpperCAmelCase_ = np.asarray(lowerCAmelCase )
return torch.tensor(lowerCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def A__ ( self , lowerCAmelCase ):
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCAmelCase , "__array__" ) and not isinstance(lowerCAmelCase , torch.Tensor ):
UpperCAmelCase_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCAmelCase ) for substruct in data_struct] )
elif isinstance(lowerCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCAmelCase ) for substruct in data_struct] )
return self._tensorize(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
return map_nested(self._recursive_tensorize , lowerCAmelCase , map_list=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_row(lowerCAmelCase )
UpperCAmelCase_ = self.python_features_decoder.decode_row(lowerCAmelCase )
return self.recursive_tensorize(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_column(lowerCAmelCase )
UpperCAmelCase_ = self.python_features_decoder.decode_column(lowerCAmelCase , pa_table.column_names[0] )
UpperCAmelCase_ = self.recursive_tensorize(lowerCAmelCase )
UpperCAmelCase_ = self._consolidate(lowerCAmelCase )
return column
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_batch(lowerCAmelCase )
UpperCAmelCase_ = self.python_features_decoder.decode_batch(lowerCAmelCase )
UpperCAmelCase_ = self.recursive_tensorize(lowerCAmelCase )
for column_name in batch:
UpperCAmelCase_ = self._consolidate(batch[column_name] )
return batch
| 579
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 579
| 1
|
'''simple docstring'''
def lowercase ( lowerCAmelCase : Optional[Any]):
"""simple docstring"""
_A : List[Any] = [1]
_A : Union[str, Any] = 0, 0, 0
_A : Optional[int] = ugly_nums[ia] * 2
_A : Any = ugly_nums[ia] * 3
_A : str = ugly_nums[ia] * 5
for _ in range(1 , lowerCAmelCase):
_A : Tuple = min(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
ugly_nums.append(lowerCAmelCase)
if next_num == next_a:
ia += 1
_A : int = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_A : Union[str, Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_A : List[Any] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'{ugly_numbers(200) = }')
| 711
|
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowercase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str]=1024):
"""simple docstring"""
_A , _A : str = [], []
_A : Optional[Any] = list(zip(lowerCAmelCase , lowerCAmelCase))
_A , _A : Dict = sorted_examples[0]
def is_too_big(lowerCAmelCase : Optional[Any]):
return tok(lowerCAmelCase , return_tensors='''pt''').input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:]):
_A : Optional[int] = new_src + ''' ''' + src
_A : List[str] = new_tgt + ''' ''' + tgt
if is_too_big(lowerCAmelCase) or is_too_big(lowerCAmelCase): # cant fit, finalize example
finished_src.append(lowerCAmelCase)
finished_tgt.append(lowerCAmelCase)
_A , _A : Dict = src, tgt
else: # can fit, keep adding
_A , _A : Optional[int] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowerCAmelCase)
finished_tgt.append(lowerCAmelCase)
return finished_src, finished_tgt
def lowercase ( lowerCAmelCase : Any , lowerCAmelCase : Path , lowerCAmelCase : Tuple , lowerCAmelCase : Any):
"""simple docstring"""
_A : Dict = Path(lowerCAmelCase)
save_path.mkdir(exist_ok=lowerCAmelCase)
for split in ["train"]:
_A , _A : List[str] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
_A : Optional[Any] = [x.rstrip() for x in Path(lowerCAmelCase).open().readlines()]
_A : Optional[Any] = [x.rstrip() for x in Path(lowerCAmelCase).open().readlines()]
_A , _A : Optional[Any] = pack_examples(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
print(f"""packed {split} split from {len(lowerCAmelCase)} examples -> {len(lowerCAmelCase)}.""")
Path(save_path / f"""{split}.source""").open('''w''').write('''\n'''.join(lowerCAmelCase))
Path(save_path / f"""{split}.target""").open('''w''').write('''\n'''.join(lowerCAmelCase))
for split in ["val", "test"]:
_A , _A : List[str] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(lowerCAmelCase , save_path / f"""{split}.source""")
shutil.copyfile(lowerCAmelCase , save_path / f"""{split}.target""")
def lowercase ( ):
"""simple docstring"""
_A : Any = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=lowerCAmelCase , help='''like facebook/bart-large-cnn,t5-base, etc.''')
parser.add_argument('''--max_seq_len''' , type=lowerCAmelCase , default=128)
parser.add_argument('''--data_dir''' , type=lowerCAmelCase)
parser.add_argument('''--save_path''' , type=lowerCAmelCase)
_A : List[str] = parser.parse_args()
_A : List[Any] = AutoTokenizer.from_pretrained(args.tok_name)
return pack_data_dir(lowerCAmelCase , Path(args.data_dir) , args.max_seq_len , args.save_path)
if __name__ == "__main__":
packer_cli()
| 417
| 0
|
import pytest
import datasets
# Import fixture modules as plugins
__snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _A ( _lowercase ) -> str:
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = tmp_path_factory.getbasetemp() / 'cache'
__UpperCamelCase = test_hf_cache_home / 'datasets'
__UpperCamelCase = test_hf_cache_home / 'metrics'
__UpperCamelCase = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_lowercase ) )
@pytest.fixture(autouse=_lowercase , scope='session' )
def _A ( ) -> Dict:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _lowercase )
@pytest.fixture
def _A ( _lowercase ) -> Any:
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _lowercase )
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> list[tuple[int, int]]:
__lowercase , __lowercase = position
__lowercase = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__lowercase = []
for position in positions:
__lowercase , __lowercase = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(snake_case )
return permissible_positions
def SCREAMING_SNAKE_CASE ( snake_case ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> bool:
if is_complete(snake_case ):
return True
for position in get_valid_pos(snake_case , len(snake_case ) ):
__lowercase , __lowercase = position
if board[y][x] == 0:
__lowercase = curr + 1
if open_knight_tour_helper(snake_case , snake_case , curr + 1 ):
return True
__lowercase = 0
return False
def SCREAMING_SNAKE_CASE ( snake_case ) -> list[list[int]]:
__lowercase = [[0 for i in range(snake_case )] for j in range(snake_case )]
for i in range(snake_case ):
for j in range(snake_case ):
__lowercase = 1
if open_knight_tour_helper(snake_case , (i, j) , 1 ):
return board
__lowercase = 0
__lowercase = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 375
| 0
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
__UpperCAmelCase : int
__UpperCAmelCase : Tuple[int] = (16, 32, 96, 256)
__UpperCAmelCase : jnp.dtype = jnp.floataa
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase_ : Optional[int] = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase_ : str = self.block_out_channels[i]
lowerCamelCase_ : List[Any] = self.block_out_channels[i + 1]
lowerCamelCase_ : Tuple = nn.Conv(
a_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(a_ )
lowerCamelCase_ : Optional[Any] = nn.Conv(
a_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(a_ )
lowerCamelCase_ : Union[str, Any] = blocks
lowerCamelCase_ : int = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , a_ ):
lowerCamelCase_ : Optional[Any] = self.conv_in(a_ )
lowerCamelCase_ : Optional[Any] = nn.silu(a_ )
for block in self.blocks:
lowerCamelCase_ : str = block(a_ )
lowerCamelCase_ : str = nn.silu(a_ )
lowerCamelCase_ : Any = self.conv_out(a_ )
return embedding
@flax_register_to_config
class lowerCAmelCase__ ( nn.Module, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : int = 32
__UpperCAmelCase : int = 4
__UpperCAmelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__UpperCAmelCase : Union[bool, Tuple[bool]] = False
__UpperCAmelCase : Tuple[int] = (320, 640, 1280, 1280)
__UpperCAmelCase : int = 2
__UpperCAmelCase : Union[int, Tuple[int]] = 8
__UpperCAmelCase : Optional[Union[int, Tuple[int]]] = None
__UpperCAmelCase : int = 1280
__UpperCAmelCase : float = 0.0
__UpperCAmelCase : bool = False
__UpperCAmelCase : jnp.dtype = jnp.floataa
__UpperCAmelCase : bool = True
__UpperCAmelCase : int = 0
__UpperCAmelCase : str = "rgb"
__UpperCAmelCase : Tuple[int] = (16, 32, 96, 256)
def _UpperCamelCase ( self , a_ ):
# init input tensors
lowerCamelCase_ : Tuple = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase_ : Union[str, Any] = jnp.zeros(a_ , dtype=jnp.floataa )
lowerCamelCase_ : int = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase_ : int = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase_ : Union[str, Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase_ : List[str] = jnp.zeros(a_ , dtype=jnp.floataa )
lowerCamelCase_ : List[str] = jax.random.split(a_ )
lowerCamelCase_ : Dict = {"params": params_rng, "dropout": dropout_rng}
return self.init(a_ , a_ , a_ , a_ , a_ )["params"]
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.block_out_channels
lowerCamelCase_ : Optional[Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase_ : List[str] = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase_ : Dict = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase_ : Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase_ : int = FlaxTimestepEmbedding(a_ , dtype=self.dtype )
lowerCamelCase_ : List[str] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCamelCase_ : Dict = self.only_cross_attention
if isinstance(a_ , a_ ):
lowerCamelCase_ : List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(a_ , a_ ):
lowerCamelCase_ : Optional[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase_ : Tuple = []
lowerCamelCase_ : List[str] = []
lowerCamelCase_ : Dict = block_out_channels[0]
lowerCamelCase_ : Any = nn.Conv(
a_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a_ )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase_ : List[str] = output_channel
lowerCamelCase_ : Any = block_out_channels[i]
lowerCamelCase_ : Optional[Any] = i == len(a_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase_ : Dict = FlaxCrossAttnDownBlockaD(
in_channels=a_ , out_channels=a_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCamelCase_ : Dict = FlaxDownBlockaD(
in_channels=a_ , out_channels=a_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(a_ )
for _ in range(self.layers_per_block ):
lowerCamelCase_ : Dict = nn.Conv(
a_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a_ )
if not is_final_block:
lowerCamelCase_ : Union[str, Any] = nn.Conv(
a_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a_ )
lowerCamelCase_ : List[str] = down_blocks
lowerCamelCase_ : Union[str, Any] = controlnet_down_blocks
# mid
lowerCamelCase_ : Optional[int] = block_out_channels[-1]
lowerCamelCase_ : List[str] = FlaxUNetMidBlockaDCrossAttn(
in_channels=a_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCamelCase_ : str = nn.Conv(
a_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , a_ , a_ , a_ , a_ , a_ = 1.0 , a_ = True , a_ = False , ):
lowerCamelCase_ : int = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase_ : int = jnp.flip(a_ , axis=1 )
# 1. time
if not isinstance(a_ , jnp.ndarray ):
lowerCamelCase_ : List[str] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(a_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase_ : str = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase_ : Any = jnp.expand_dims(a_ , 0 )
lowerCamelCase_ : Optional[int] = self.time_proj(a_ )
lowerCamelCase_ : Any = self.time_embedding(a_ )
# 2. pre-process
lowerCamelCase_ : Union[str, Any] = jnp.transpose(a_ , (0, 2, 3, 1) )
lowerCamelCase_ : Tuple = self.conv_in(a_ )
lowerCamelCase_ : Tuple = jnp.transpose(a_ , (0, 2, 3, 1) )
lowerCamelCase_ : int = self.controlnet_cond_embedding(a_ )
sample += controlnet_cond
# 3. down
lowerCamelCase_ : str = (sample,)
for down_block in self.down_blocks:
if isinstance(a_ , a_ ):
lowerCamelCase_ : Dict = down_block(a_ , a_ , a_ , deterministic=not train )
else:
lowerCamelCase_ : Tuple = down_block(a_ , a_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase_ : List[str] = self.mid_block(a_ , a_ , a_ , deterministic=not train )
# 5. contronet blocks
lowerCamelCase_ : Union[str, Any] = ()
for down_block_res_sample, controlnet_block in zip(a_ , self.controlnet_down_blocks ):
lowerCamelCase_ : Optional[int] = controlnet_block(a_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase_ : List[Any] = controlnet_down_block_res_samples
lowerCamelCase_ : Any = self.controlnet_mid_block(a_ )
# 6. scaling
lowerCamelCase_ : Optional[int] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=a_ , mid_block_res_sample=a_ )
| 715
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=400 , a_=True , a_=None , a_=True , ):
lowerCamelCase_ : int = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase_ : str = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : Tuple = num_channels
lowerCamelCase_ : Optional[int] = image_size
lowerCamelCase_ : List[str] = min_resolution
lowerCamelCase_ : Tuple = max_resolution
lowerCamelCase_ : Tuple = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : List[str] = apply_ocr
def _UpperCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "apply_ocr" ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCamelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
lowerCamelCase_ : int = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCamelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# Initialize image_processing
lowerCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase_ : Union[str, Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self ):
# with apply_OCR = True
lowerCamelCase_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCamelCase_ : Optional[Any] = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCamelCase_ : List[Any] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase_ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
lowerCamelCase_ : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ )
lowerCamelCase_ : List[str] = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 73
| 0
|
class lowercase_ :
def __init__( self) -> str:
a__ =0
a__ =0
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> Dict:
if vertex not in self.adjacency:
a__ ={}
self.num_vertices += 1
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> Dict:
self.add_vertex(lowercase_)
self.add_vertex(lowercase_)
if head == tail:
return
a__ =weight
a__ =weight
def __UpperCamelCase ( self) -> Optional[int]:
a__ =self.get_edges()
for edge in edges:
a__ , a__ , a__ =edge
edges.remove((tail, head, weight))
for i in range(len(lowercase_)):
a__ =list(edges[i])
edges.sort(key=lambda lowercase_: e[2])
for i in range(len(lowercase_) - 1):
if edges[i][2] >= edges[i + 1][2]:
a__ =edges[i][2] + 1
for edge in edges:
a__ , a__ , a__ =edge
a__ =weight
a__ =weight
def __str__( self) -> Tuple:
a__ =''
for tail in self.adjacency:
for head in self.adjacency[tail]:
a__ =self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n')
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def __UpperCamelCase ( self) -> int:
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( lowercase_=None , lowercase_=None) -> Dict:
a__ =Graph()
if vertices is None:
a__ =[]
if edges is None:
a__ =[]
for vertex in vertices:
g.add_vertex(lowercase_)
for edge in edges:
g.add_edge(*lowercase_)
return g
class lowercase_ :
def __init__( self) -> Tuple:
a__ ={}
a__ ={}
def __len__( self) -> Dict:
return len(self.parent)
def __UpperCamelCase ( self , lowercase_) -> Dict:
if item in self.parent:
return self.find(lowercase_)
a__ =item
a__ =0
return item
def __UpperCamelCase ( self , lowercase_) -> Optional[Any]:
if item not in self.parent:
return self.make_set(lowercase_)
if item != self.parent[item]:
a__ =self.find(self.parent[item])
return self.parent[item]
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> Dict:
a__ =self.find(lowercase_)
a__ =self.find(lowercase_)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
a__ =roota
return roota
if self.rank[roota] < self.rank[roota]:
a__ =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
a__ =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( lowercase_) -> Optional[Any]:
a__ =graph.num_vertices
a__ =Graph.UnionFind()
a__ =[]
while num_components > 1:
a__ ={}
for vertex in graph.get_vertices():
a__ =-1
a__ =graph.get_edges()
for edge in edges:
a__ , a__ , a__ =edge
edges.remove((tail, head, weight))
for edge in edges:
a__ , a__ , a__ =edge
a__ =union_find.find(lowercase_)
a__ =union_find.find(lowercase_)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
a__ =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
a__ =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
a__ , a__ , a__ =cheap_edge[vertex]
if union_find.find(lowercase_) != union_find.find(lowercase_):
union_find.union(lowercase_ , lowercase_)
mst_edges.append(cheap_edge[vertex])
a__ =num_components - 1
a__ =Graph.build(edges=lowercase_)
return mst
| 20
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : str ={"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCAmelCase : Dict ={
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
__lowerCAmelCase : Tuple ={
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
__lowerCAmelCase : Optional[int] ="""▁"""
class _A ( lowerCAmelCase ):
snake_case__ : Tuple = VOCAB_FILES_NAMES
snake_case__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
lowercase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
lowercase = len(self.sp_model ) - 1
lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A__ ( self ):
"""simple docstring"""
return len(self.sp_model )
def A__ ( self ):
"""simple docstring"""
lowercase = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase = self.sp_model.PieceToId(__lowerCAmelCase )
return spm_id if spm_id else self.unk_token_id
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = []
lowercase = """"""
lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
lowercase = True
lowercase = []
else:
current_sub_tokens.append(__lowerCAmelCase )
lowercase = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def __getstate__( self ):
"""simple docstring"""
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 359
| 0
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def A ( ) -> int:
'''simple docstring'''
_UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
_UpperCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
return image
def A ( _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = dct.pop(_UpperCAmelCase )
_UpperCAmelCase = val
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ) -> List[str]:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_UpperCAmelCase = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
_UpperCAmelCase = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
_UpperCAmelCase = torch.cat((q_bias, torch.zeros_like(_UpperCAmelCase , requires_grad=_UpperCAmelCase ), v_bias) )
_UpperCAmelCase = qkv_bias
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = 364 if 'coco' in model_name else 224
_UpperCAmelCase = BlipaVisionConfig(image_size=_UpperCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_UpperCAmelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_UpperCAmelCase ).to_dict()
elif "opt-6.7b" in model_name:
_UpperCAmelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_UpperCAmelCase ).to_dict()
elif "t5-xl" in model_name:
_UpperCAmelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_UpperCAmelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
_UpperCAmelCase = BlipaConfig(vision_config=_UpperCAmelCase , text_config=_UpperCAmelCase )
return config, image_size
@torch.no_grad()
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : int=False ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
_UpperCAmelCase = tokenizer('\n' , add_special_tokens=_UpperCAmelCase ).input_ids[0]
_UpperCAmelCase , _UpperCAmelCase = get_blipa_config(_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
_UpperCAmelCase = BlipaForConditionalGeneration(_UpperCAmelCase ).eval()
_UpperCAmelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
_UpperCAmelCase , _UpperCAmelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
_UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = load_model_and_preprocess(
name=_UpperCAmelCase , model_type=_UpperCAmelCase , is_eval=_UpperCAmelCase , device=_UpperCAmelCase )
original_model.eval()
print('Done!' )
# update state dict keys
_UpperCAmelCase = original_model.state_dict()
_UpperCAmelCase = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_UpperCAmelCase = state_dict.pop(_UpperCAmelCase )
if key.startswith('Qformer.bert' ):
_UpperCAmelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
_UpperCAmelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
_UpperCAmelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
_UpperCAmelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
_UpperCAmelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
_UpperCAmelCase = key.replace('t5' , 'language' )
_UpperCAmelCase = val
# read in qv biases
read_in_q_v_bias(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = hf_model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
assert len(_UpperCAmelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_UpperCAmelCase = load_demo_image()
_UpperCAmelCase = vis_processors['eval'](_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
_UpperCAmelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_UpperCAmelCase )
# create processor
_UpperCAmelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase )
_UpperCAmelCase = BlipaProcessor(image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
_UpperCAmelCase = processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values.to(_UpperCAmelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase )
original_model.to(_UpperCAmelCase )
hf_model.to(_UpperCAmelCase )
with torch.no_grad():
if "opt" in model_name:
_UpperCAmelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
_UpperCAmelCase = hf_model(_UpperCAmelCase , _UpperCAmelCase ).logits
else:
_UpperCAmelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
_UpperCAmelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
_UpperCAmelCase = hf_model(_UpperCAmelCase , _UpperCAmelCase , labels=_UpperCAmelCase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_UpperCAmelCase = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_UpperCAmelCase )
assert torch.allclose(logits[0, :3, :3] , _UpperCAmelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_UpperCAmelCase = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_UpperCAmelCase )
else:
# cast to same type
_UpperCAmelCase = logits.dtype
assert torch.allclose(original_logits.to(_UpperCAmelCase ) , _UpperCAmelCase , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
_UpperCAmelCase = ''
_UpperCAmelCase = tokenizer(_UpperCAmelCase , return_tensors='pt' ).input_ids.to(_UpperCAmelCase )
_UpperCAmelCase = original_model.generate({'image': original_pixel_values} )
_UpperCAmelCase = hf_model.generate(
_UpperCAmelCase , _UpperCAmelCase , do_sample=_UpperCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _UpperCAmelCase )
_UpperCAmelCase = input_ids.shape[1]
_UpperCAmelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_UpperCAmelCase )
_UpperCAmelCase = [text.strip() for text in output_text]
print('HF generation:' , _UpperCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_UpperCAmelCase )
hf_model.save_pretrained(_UpperCAmelCase )
if push_to_hub:
processor.push_to_hub(F"nielsr/{model_name}" )
hf_model.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
UpperCAmelCase__ = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
UpperCAmelCase__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 639
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = {"facebook/bart-base": BartForConditionalGeneration}
UpperCAmelCase__ = {"facebook/bart-base": BartTokenizer}
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCAmelCase , )
parser.add_argument(
'--config_name' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=_UpperCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Where to store the final ONNX file.' )
_UpperCAmelCase = parser.parse_args()
return args
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]="cpu" ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = model_dict[model_name].from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_UpperCAmelCase ) )
with torch.no_grad():
_UpperCAmelCase = 'My friends are cool but they eat too many carbs.'
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_UpperCAmelCase = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=_UpperCAmelCase , max_length=_UpperCAmelCase , early_stopping=_UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=_UpperCAmelCase , )
logger.info('Model exported to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_UpperCAmelCase ) )
logger.info('Deduplicated and optimized model written to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_UpperCAmelCase )
_UpperCAmelCase = ort_sess.run(
_UpperCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(_UpperCAmelCase ),
'max_length': np.array(_UpperCAmelCase ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(_UpperCAmelCase )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 639
| 1
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : Dict = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class _snake_case ( a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE : int = GPTSwaTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Any = False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = GPTSwaTokenizer(_SCREAMING_SNAKE_CASE , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = 'This is a test'
lowerCAmelCase = 'This is a test'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = '<s>'
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 20_00 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = GPTSwaTokenizer(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
_SCREAMING_SNAKE_CASE , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
lowerCAmelCase = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
# fmt: off
self.assertListEqual(
_SCREAMING_SNAKE_CASE , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = GPTSwaTokenizer(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = ['This is a test', 'I was born in 92000, and this is falsé.']
lowerCAmelCase = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertListEqual(tokenizer.encode_fast(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Test that decode_fast returns the input text
for text, token_ids in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.decode_fast(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
lowerCAmelCase = {'input_ids': [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='AI-Sweden/gpt-sw3-126m' , sequences=_SCREAMING_SNAKE_CASE , )
| 284
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase : str = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 284
| 1
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class __lowerCamelCase ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=lowerCamelCase , )
assert hasattr(self , """env""" )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Optional[int]:
# configuration for running training on smdistributed Model Parallel
snake_case_ = {
"""enabled""": True,
"""processes_per_host""": 8,
}
snake_case_ = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
snake_case_ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
snake_case_ = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase , py_version="""py36""" , )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Dict:
TrainingJobAnalytics(lowerCamelCase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> str:
# create estimator
snake_case_ = self.create_estimator(lowerCamelCase )
# run training
estimator.fit()
# result dataframe
snake_case_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCamelCase )
| 161
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
snake_case_ = os.path.abspath(lowercase_ )
logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
snake_case_ = tf.train.list_variables(lowercase_ )
snake_case_ = []
snake_case_ = []
snake_case_ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
snake_case_ = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(f'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
snake_case_ = name[1:]
# figure out how many levels deep the name is
snake_case_ = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(lowercase_ )
# read data
snake_case_ = tf.train.load_variable(lowercase_ , lowercase_ )
names.append("""/""".join(lowercase_ ) )
arrays.append(lowercase_ )
logger.info(f'''Read a total of {len(lowercase_ ):,} layers''' )
# Sanity check
if len(set(lowercase_ ) ) != 1:
raise ValueError(f'''Found layer names with different depths (layer depth {list(set(lowercase_ ) )})''' )
snake_case_ = list(set(lowercase_ ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(lowercase_ , lowercase_ ):
snake_case_ = full_name.split("""/""" )
snake_case_ = model
snake_case_ = []
for i, m_name in enumerate(lowercase_ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
snake_case_ = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
snake_case_ = getattr(lowercase_ , """embeddings""" )
snake_case_ = getattr(lowercase_ , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
snake_case_ = getattr(lowercase_ , """encoder""" )
snake_case_ = getattr(lowercase_ , """layer""" )
snake_case_ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
snake_case_ = getattr(lowercase_ , """pooler""" )
snake_case_ = getattr(lowercase_ , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
snake_case_ = getattr(lowercase_ , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
snake_case_ = getattr(lowercase_ , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
snake_case_ = getattr(lowercase_ , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
snake_case_ = getattr(lowercase_ , """token_type_embeddings""" )
else:
raise ValueError(f'''Unknown embedding layer with name {full_name}''' )
trace.append("""weight""" )
snake_case_ = getattr(lowercase_ , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
snake_case_ = getattr(lowercase_ , """attention""" )
snake_case_ = getattr(lowercase_ , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
snake_case_ = getattr(lowercase_ , """attention""" )
snake_case_ = getattr(lowercase_ , """output""" )
snake_case_ = getattr(lowercase_ , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
snake_case_ = getattr(lowercase_ , """attention""" )
snake_case_ = getattr(lowercase_ , """output""" )
snake_case_ = getattr(lowercase_ , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
snake_case_ = getattr(lowercase_ , """output""" )
snake_case_ = getattr(lowercase_ , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
snake_case_ = getattr(lowercase_ , """output""" )
snake_case_ = getattr(lowercase_ , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
snake_case_ = getattr(lowercase_ , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
snake_case_ = getattr(lowercase_ , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
snake_case_ = getattr(lowercase_ , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
snake_case_ = getattr(lowercase_ , """intermediate""" )
snake_case_ = getattr(lowercase_ , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
snake_case_ = getattr(lowercase_ , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
snake_case_ = getattr(lowercase_ , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
snake_case_ = getattr(lowercase_ , """weight""" )
else:
logger.warning(f'''Ignored {m_name}''' )
# for certain layers reshape is necessary
snake_case_ = """.""".join(lowercase_ )
if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , lowercase_ ) or re.match(
r"""(\S+)\.attention\.output\.dense\.weight""" , lowercase_ ):
snake_case_ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
snake_case_ = array.transpose()
if pointer.shape == array.shape:
snake_case_ = torch.from_numpy(lowercase_ )
else:
raise ValueError(
f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
f''' {array.shape}''' )
logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
logger.info(f'''Loading model based on config from {config_path}...''' )
snake_case_ = BertConfig.from_json_file(lowercase_ )
snake_case_ = BertModel(lowercase_ )
# Load weights from checkpoint
logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , lowercase_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
lowerCamelCase_ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 161
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( a_: int ):
if n == 1 or not isinstance(a_, a_ ):
return 0
elif n == 2:
return 1
else:
_UpperCAmelCase : Dict = [0, 1]
for i in range(2, n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __UpperCAmelCase ( a_: int ):
_UpperCAmelCase : str = 0
_UpperCAmelCase : Dict = 2
while digits < n:
index += 1
_UpperCAmelCase : Any = len(str(fibonacci(a_ ) ) )
return index
def __UpperCAmelCase ( a_: int = 1_000 ):
return fibonacci_digits_index(a_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 494
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = '''gpt_bigcode'''
UpperCamelCase_ : Optional[Any] = ['''past_key_values''']
UpperCamelCase_ : int = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[int] , lowerCAmelCase__ : Union[str, Any]=5_0_2_5_7 , lowerCAmelCase__ : Optional[Any]=1_0_2_4 , lowerCAmelCase__ : int=7_6_8 , lowerCAmelCase__ : Any=1_2 , lowerCAmelCase__ : Any=1_2 , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : List[Any]="gelu_pytorch_tanh" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Tuple=1e-5 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Union[str, Any]=5_0_2_5_6 , lowerCAmelCase__ : List[str]=5_0_2_5_6 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Tuple=True , **lowerCAmelCase__ : Any , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Tuple = vocab_size
_UpperCAmelCase : int = n_positions
_UpperCAmelCase : int = n_embd
_UpperCAmelCase : List[Any] = n_layer
_UpperCAmelCase : Any = n_head
_UpperCAmelCase : int = n_inner
_UpperCAmelCase : Optional[Any] = activation_function
_UpperCAmelCase : Optional[int] = resid_pdrop
_UpperCAmelCase : Dict = embd_pdrop
_UpperCAmelCase : Optional[int] = attn_pdrop
_UpperCAmelCase : int = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Dict = scale_attn_weights
_UpperCAmelCase : Any = use_cache
_UpperCAmelCase : Dict = attention_softmax_in_fpaa
_UpperCAmelCase : str = scale_attention_softmax_in_fpaa
_UpperCAmelCase : List[str] = multi_query
_UpperCAmelCase : Any = bos_token_id
_UpperCAmelCase : str = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 494
| 1
|
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCamelCase_ : Tuple = False
try:
lowerCamelCase_ : List[str] = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ = None , snake_case_ = [] ):
"""simple docstring"""
A_ : List[str] = 0
A_ : Tuple = choices
A_ : Optional[int] = prompt
if sys.platform == "win32":
A_ : List[Any] = """*"""
else:
A_ : Dict = """➔ """
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = "" ):
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , _a )
else:
forceWrite(self.choices[index] , _a )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if index == self.position:
forceWrite(F""" {self.arrow_char} """ )
self.write_choice(_a )
else:
forceWrite(F""" {self.choices[index]}""" )
reset_cursor()
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = 1 ):
"""simple docstring"""
A_ : Optional[int] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_a )
move_cursor(_a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def lowerCamelCase_ ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def lowerCamelCase_ ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_a )] for number in range(1_0 )] )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Dict = int(chr(self.current_selection ) )
A_ : Dict = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _a )
else:
return
else:
return
def lowerCamelCase_ ( self , snake_case_ = 0 ):
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
A_ : int = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_a )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
A_ : Dict = int(builtins.input() )
except ValueError:
A_ : List[Any] = default_choice
else:
A_ : Union[str, Any] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(_a , '\n' )
return choice
| 710
|
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
A_ : int = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
A_ : List[str] = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
A_ : Union[str, Any] = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
A_ : Optional[int] = [element for element in set_a if element in set_b]
if alternative_union:
A_ : Optional[int] = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
A_ : Union[str, Any] = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
lowerCamelCase_ : Optional[int] = {'a', 'b', 'c', 'd', 'e'}
lowerCamelCase_ : str = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 302
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __snake_case ( lowercase : str=None ):
if subparsers is not None:
snake_case_ = subparsers.add_parser("test" )
else:
snake_case_ = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=lowercase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def __snake_case ( lowercase : str ):
snake_case_ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
snake_case_ = script_name
else:
snake_case_ = f'''--config_file={args.config_file} {script_name}'''
snake_case_ = ["accelerate-launch"] + test_args.split()
snake_case_ = execute_subprocess_async(lowercase , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __snake_case ( ):
snake_case_ = test_command_parser()
snake_case_ = parser.parse_args()
test_command(lowercase )
if __name__ == "__main__":
main()
| 508
|
'''simple docstring'''
from math import isqrt
def __snake_case ( lowercase : int ):
snake_case_ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowercase , lowercase ):
snake_case_ = False
return [i for i in range(2 , lowercase ) if is_prime[i]]
def __snake_case ( lowercase : int = 10**8 ):
snake_case_ = calculate_prime_numbers(max_number // 2 )
snake_case_ = 0
snake_case_ = 0
snake_case_ = len(lowercase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 508
| 1
|
import argparse
import os
import re
import packaging.version
UpperCAmelCase_ = 'examples/'
UpperCAmelCase_ = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCAmelCase_ = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
UpperCAmelCase_ = 'README.md'
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Optional[Any] ) -> Dict:
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCamelCase__ : List[Any] = f.read()
UpperCamelCase__ ,UpperCamelCase__ : int = REPLACE_PATTERNS[pattern]
UpperCamelCase__ : Dict = replace.replace('''VERSION''' , __UpperCAmelCase )
UpperCamelCase__ : int = re_pattern.sub(__UpperCAmelCase , __UpperCAmelCase )
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] ) -> Dict:
for folder, directories, fnames in os.walk(__UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase , pattern='''examples''' )
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] , __UpperCAmelCase: int=False ) -> Tuple:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if not patch:
update_version_in_examples(__UpperCAmelCase )
def lowerCAmelCase_ ( ) -> Optional[Any]:
UpperCamelCase__ : int = '''🤗 Transformers currently provides the following architectures'''
UpperCamelCase__ : Any = '''1. Want to contribute a new model?'''
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCamelCase__ : List[Any] = f.readlines()
# Find the start of the list.
UpperCamelCase__ : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCamelCase__ : Union[str, Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCamelCase__ : Union[str, Any] = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__UpperCAmelCase )
def lowerCAmelCase_ ( ) -> Optional[int]:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCamelCase__ : Optional[int] = f.read()
UpperCamelCase__ : Dict = REPLACE_PATTERNS['''init'''][0].search(__UpperCAmelCase ).groups()[0]
return packaging.version.parse(__UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple=False ) -> int:
UpperCamelCase__ : List[str] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCamelCase__ : int = default_version.base_version
elif patch:
UpperCamelCase__ : int = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
UpperCamelCase__ : Tuple = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
UpperCamelCase__ : Optional[int] = input(f"Which version are you releasing? [{default_version}]" )
if len(__UpperCAmelCase ) == 0:
UpperCamelCase__ : Optional[int] = default_version
print(f"Updating version to {version}." )
global_version_update(__UpperCAmelCase , patch=__UpperCAmelCase )
def lowerCAmelCase_ ( ) -> Optional[Any]:
UpperCamelCase__ : Dict = get_version()
UpperCamelCase__ : Union[str, Any] = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
UpperCamelCase__ : Optional[Any] = current_version.base_version
# Check with the user we got that right.
UpperCamelCase__ : str = input(f"Which version are we developing now? [{dev_version}]" )
if len(__UpperCAmelCase ) == 0:
UpperCamelCase__ : List[Any] = dev_version
print(f"Updating version to {version}." )
global_version_update(__UpperCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCAmelCase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 369
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
UpperCAmelCase_ = False
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase__ : int = '''A painting of a squirrel eating a burger '''
UpperCamelCase__ : str = torch.manual_seed(0 )
UpperCamelCase__ : int = pipe(
prompt=__magic_name__, generator=__magic_name__, guidance_scale=7.5, num_inference_steps=2, output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase__ : Optional[Any] = generator.manual_seed(0 )
UpperCamelCase__ : str = pipe(
prompt=__magic_name__, generator=__magic_name__, guidance_scale=7.5, num_inference_steps=2, output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : str = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''', torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase__ : Optional[int] = '''A painting of a squirrel eating a burger '''
UpperCamelCase__ : str = torch.manual_seed(0 )
UpperCamelCase__ : Optional[int] = pipe(
prompt=__magic_name__, generator=__magic_name__, guidance_scale=7.5, num_inference_steps=50, output_type='''numpy''' ).images
UpperCamelCase__ : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : int = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 369
| 1
|
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : complex, UpperCAmelCase_ : str = "x", UpperCAmelCase_ : float = 10**-10, UpperCAmelCase_ : int = 1, ) -> complex:
"""simple docstring"""
A__ = symbols(UpperCAmelCase_ )
A__ = lambdify(UpperCAmelCase_, UpperCAmelCase_ )
A__ = lambdify(UpperCAmelCase_, diff(UpperCAmelCase_, UpperCAmelCase_ ) )
A__ = starting_point
while True:
if diff_function(UpperCAmelCase_ ) != 0:
A__ = prev_guess - multiplicity * func(UpperCAmelCase_ ) / diff_function(
UpperCAmelCase_ )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
A__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
f'{newton_raphson("exp(x) - 1", 10, precision=0.005)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 104
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : Dict = ConsistencyModelPipeline
a__ : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a__ : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
a__ : Any = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
])
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
UpperCAmelCase_= UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_= UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : int=False ) -> List[Any]:
if class_cond:
UpperCAmelCase_= self.dummy_cond_unet
else:
UpperCAmelCase_= self.dummy_uncond_unet
# Default to CM multistep sampler
UpperCAmelCase_= CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_= {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def _SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str=0 ) -> Optional[Any]:
if str(__UpperCAmelCase ).startswith("""mps""" ):
UpperCAmelCase_= torch.manual_seed(__UpperCAmelCase )
else:
UpperCAmelCase_= torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase_= {
"""batch_size""": 1,
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""generator""": generator,
"""output_type""": """np""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_= """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_= self.get_dummy_components()
UpperCAmelCase_= ConsistencyModelPipeline(**__UpperCAmelCase )
UpperCAmelCase_= pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
UpperCAmelCase_= """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_= self.get_dummy_components(class_cond=__UpperCAmelCase )
UpperCAmelCase_= ConsistencyModelPipeline(**__UpperCAmelCase )
UpperCAmelCase_= pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_= 0
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
UpperCAmelCase_= """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_= self.get_dummy_components()
UpperCAmelCase_= ConsistencyModelPipeline(**__UpperCAmelCase )
UpperCAmelCase_= pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_= 1
UpperCAmelCase_= None
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_= """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_= self.get_dummy_components(class_cond=__UpperCAmelCase )
UpperCAmelCase_= ConsistencyModelPipeline(**__UpperCAmelCase )
UpperCAmelCase_= pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_dummy_inputs(__UpperCAmelCase )
UpperCAmelCase_= 1
UpperCAmelCase_= None
UpperCAmelCase_= 0
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : int="cpu" , __UpperCAmelCase : str=torch.floataa , __UpperCAmelCase : Tuple=(1, 3, 64, 64) ) -> str:
UpperCAmelCase_= torch.manual_seed(__UpperCAmelCase )
UpperCAmelCase_= {
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""class_labels""": 0,
"""generator""": generator,
"""output_type""": """np""",
}
if get_fixed_latents:
UpperCAmelCase_= self.get_fixed_latents(seed=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase , shape=__UpperCAmelCase )
UpperCAmelCase_= latents
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Any=0 , __UpperCAmelCase : int="cpu" , __UpperCAmelCase : Optional[Any]=torch.floataa , __UpperCAmelCase : Any=(1, 3, 64, 64) ) -> List[str]:
if type(__UpperCAmelCase ) == str:
UpperCAmelCase_= torch.device(__UpperCAmelCase )
UpperCAmelCase_= torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase_= randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase )
return latents
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
UpperCAmelCase_= UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCAmelCase_= CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_= ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_inputs()
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
UpperCAmelCase_= UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCAmelCase_= CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_= ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_inputs()
UpperCAmelCase_= 1
UpperCAmelCase_= None
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
UpperCAmelCase_= UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCAmelCase_= CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_= ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_inputs(get_fixed_latents=__UpperCAmelCase , device=__UpperCAmelCase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__UpperCAmelCase , enable_math=__UpperCAmelCase , enable_mem_efficient=__UpperCAmelCase ):
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
UpperCAmelCase_= UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
UpperCAmelCase_= CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
UpperCAmelCase_= ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
pipe.to(torch_device=__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= self.get_inputs(get_fixed_latents=__UpperCAmelCase , device=__UpperCAmelCase )
UpperCAmelCase_= 1
UpperCAmelCase_= None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__UpperCAmelCase , enable_math=__UpperCAmelCase , enable_mem_efficient=__UpperCAmelCase ):
UpperCAmelCase_= pipe(**__UpperCAmelCase ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 593
| 0
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__magic_name__ =logging.get_logger(__name__)
class _A ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : int =["pixel_values"]
def __init__(self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowercase__ )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 224}
UpperCamelCase__ = get_size_dict(lowercase__ , default_to_square=lowercase__ )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 256, "width": 256}
UpperCamelCase__ = get_size_dict(lowercase__ , param_name='''crop_size''' )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_flip_channel_order
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCamelCase__ = get_resize_output_image_size(lowercase__ , size=size['''shortest_edge'''] , default_to_square=lowercase__ )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(lowercase__ , size=(size['''height'''], size['''width''']) , data_format=lowercase__ , **lowercase__ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Union[str, Any]:
'''simple docstring'''
return flip_channel_order(lowercase__ , data_format=lowercase__ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(lowercase__ , default_to_square=lowercase__ )
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(lowercase__ , param_name='''crop_size''' )
UpperCamelCase__ = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCamelCase__ = [self.flip_channel_order(image=lowercase__ ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowercase__ ):
UpperCamelCase__ = target_sizes.numpy()
UpperCamelCase__ = []
for idx in range(len(lowercase__ ) ):
UpperCamelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowercase__ )
UpperCamelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase__ )
else:
UpperCamelCase__ = logits.argmax(dim=1 )
UpperCamelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 702
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _A ( unittest.TestCase ):
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCamelCase__ = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(SCREAMING_SNAKE_CASE_ ) , torch_builtin(SCREAMING_SNAKE_CASE_ ) ) )
self.assertFalse(torch.allclose(gelu_python(SCREAMING_SNAKE_CASE_ ) , gelu_new(SCREAMING_SNAKE_CASE_ ) ) )
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCamelCase__ = get_activation('''gelu''' )
UpperCamelCase__ = get_activation('''gelu_10''' )
UpperCamelCase__ = torch_builtin(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = geluaa(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(SCREAMING_SNAKE_CASE_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _a (self ) -> Optional[Any]:
'''simple docstring'''
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
get_activation('''bogus''' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
get_activation(SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = get_activation('''gelu''' )
UpperCamelCase__ = 1
UpperCamelCase__ = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = acta.a
| 469
| 0
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = """ylacombe/bark-small"""
A_ = tempfile.mkdtemp()
A_ = """en_speaker_1"""
A_ = """This is a test string"""
A_ = """speaker_embeddings_path.json"""
A_ = """speaker_embeddings"""
def snake_case_ ( self , **UpperCamelCase__ ) -> int:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase__ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.get_tokenizer()
A_ = BarkProcessor(tokenizer=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
A_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
A_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
A_ = 35
A_ = 2
A_ = 8
A_ = {
"""semantic_prompt""": np.ones(UpperCamelCase__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
A_ = processor(text=self.input_string , voice_preset=UpperCamelCase__ )
A_ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
A_ = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(UpperCamelCase__ , **UpperCamelCase__ )
A_ = processor(text=self.input_string , voice_preset=UpperCamelCase__ )
A_ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
A_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.get_tokenizer()
A_ = BarkProcessor(tokenizer=UpperCamelCase__ )
A_ = processor(text=self.input_string )
A_ = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 288
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""", [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""", ["""default""", 0, 1_00 * 2**20, 9_00 * 2**20] )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config, """IN_MEMORY_MAX_SIZE""", UpperCAmelCase__ )
A_ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
A_ = dataset_size < in_memory_max_size
else:
A_ = False
A_ = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 288
| 1
|
from __future__ import annotations
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ : Any = list(range(len(__UpperCamelCase ) ) )
a_ : Union[str, Any] = [v / w for v, w in zip(__UpperCamelCase , __UpperCamelCase )]
index.sort(key=lambda __UpperCamelCase : ratio[i] , reverse=__UpperCamelCase )
a_ : float = 0
a_ : list[float] = [0] * len(__UpperCamelCase )
for i in index:
if weight[i] <= capacity:
a_ : int = 1
max_value += value[i]
capacity -= weight[i]
else:
a_ : Tuple = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 478
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__lowerCamelCase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__lowerCamelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__lowerCamelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _a ( __UpperCamelCase , __UpperCamelCase ):
a_ : List[Any] = len([g for position, g in enumerate(__UpperCamelCase ) if g == main_target[position]] )
return (item, float(__UpperCamelCase ))
def _a ( __UpperCamelCase , __UpperCamelCase ):
a_ : List[Any] = random.randint(0 , len(__UpperCamelCase ) - 1 )
a_ : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
a_ : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _a ( __UpperCamelCase , __UpperCamelCase ):
a_ : Optional[int] = list(__UpperCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
a_ : int = random.choice(__UpperCamelCase )
return "".join(__UpperCamelCase )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
a_ : Tuple = []
# Generate more children proportionally to the fitness score.
a_ : Any = int(parent_a[1] * 1_0_0 ) + 1
a_ : int = 1_0 if child_n >= 1_0 else child_n
for _ in range(__UpperCamelCase ):
a_ : Tuple = population_score[random.randint(0 , __UpperCamelCase )][0]
a_ , a_ : List[str] = crossover(parent_a[0] , __UpperCamelCase )
# Append new string to the population list.
pop.append(mutate(__UpperCamelCase , __UpperCamelCase ) )
pop.append(mutate(__UpperCamelCase , __UpperCamelCase ) )
return pop
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
a_ : Union[str, Any] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(__UpperCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
a_ : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
a_ : Optional[Any] = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(__UpperCamelCase )
# Generate random starting population.
a_ : str = []
for _ in range(__UpperCamelCase ):
population.append("""""".join([random.choice(__UpperCamelCase ) for i in range(len(__UpperCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
a_ , a_ : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__UpperCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
a_ : Tuple = [evaluate(__UpperCamelCase , __UpperCamelCase ) for item in population]
# Check if there is a matching evolution.
a_ : Optional[Any] = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
a_ : List[str] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__UpperCamelCase )
# Normalize population score to be between 0 and 1.
a_ : Optional[Any] = [
(item, score / len(__UpperCamelCase )) for item, score in population_score
]
# This is selection
for i in range(__UpperCamelCase ):
population.extend(select(population_score[int(__UpperCamelCase )] , __UpperCamelCase , __UpperCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__UpperCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__lowerCamelCase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__lowerCamelCase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 478
| 1
|
def lowercase__ ( A_: Tuple ) -> str:
"""simple docstring"""
__UpperCAmelCase =len(A_ )
__UpperCAmelCase =sum(A_ )
__UpperCAmelCase =[[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__UpperCAmelCase =True
for i in range(1 , s + 1 ):
__UpperCAmelCase =False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__UpperCAmelCase =dp[i][j - 1]
if arr[i - 1] <= j:
__UpperCAmelCase =dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__UpperCAmelCase =s - 2 * j
break
return diff
| 68
|
def __lowerCAmelCase ( _UpperCamelCase = 1000000 ) -> int:
'''simple docstring'''
lowerCamelCase__: Optional[int] = set(range(3 , _UpperCamelCase , 2 ) )
primes.add(2 )
for p in range(3 , _UpperCamelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _UpperCamelCase , _UpperCamelCase ) ) )
lowerCamelCase__: Dict = [float(_UpperCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_UpperCamelCase , limit + 1 , _UpperCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 306
| 0
|
"""simple docstring"""
import numpy as np
from PIL import Image
def _snake_case ( snake_case__ : np.ndarray , snake_case__ : int , snake_case__ : int ):
A = np.array(snake_case__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
A = 0
A = 0
A = 0
A = 0
# compute the shape of the output matrix
A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
A = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
A = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A = 0
A = 0
return updated_arr
def _snake_case ( snake_case__ : np.ndarray , snake_case__ : int , snake_case__ : int ):
A = np.array(snake_case__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
A = 0
A = 0
A = 0
A = 0
# compute the shape of the output matrix
A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
A = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
A = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A = 0
A = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
_lowercase = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 717
|
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowercase = True
except ImportError:
_lowercase = False
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( snake_case__ : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' ,type=A_ ,help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' ,type=A_ ,help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=A_ )
def __init__( self : Tuple ,A_ : bool ,A_ : str ,A_ : Tuple=None ,*A_ : List[str] ) -> Union[str, Any]:
A = testing
A = testing_file
A = path
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(A_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
A = (
Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A_ ) )
else:
with open(self._testing_file ,'r' ) as configuration_file:
A = json.load(A_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=A_ ,extra_context=A_ ,)
A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' ,'r' ) as configuration_file:
A = json.load(A_ )
A = configuration['lowercase_modelname']
A = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json' )
A = 'PyTorch' in generate_tensorflow_pytorch_and_flax
A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
A = 'Flax' in generate_tensorflow_pytorch_and_flax
A = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(A_ ,exist_ok=A_ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=A_ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ):
pass
shutil.move(
F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,)
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(A_ : int ):
with open(A_ ,'r' ) as f:
A = f.readlines()
with open(A_ ,'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A_ : str ,A_ : str ,A_ : List[str] ):
# Create temp file
A , A = mkstemp()
A = False
with fdopen(A_ ,'w' ) as new_file:
with open(A_ ) as old_file:
for line in old_file:
new_file.write(A_ )
if line_to_copy_below in line:
A = True
for line_to_copy in lines_to_copy:
new_file.write(A_ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(A_ ,A_ )
# Remove original file
remove(A_ )
# Move new file
move(A_ ,A_ )
def skip_units(A_ : Dict ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A_ : Tuple ):
with open(A_ ) as datafile:
A = []
A = False
A = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# Below: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A_ ,A_ ,A_ )
A = []
elif "# Replace with" in line and "##" not in line:
A = []
elif "##" not in line:
lines_to_copy.append(A_ )
remove(A_ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(A_ )
| 22
| 0
|
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = DebertaVaTokenizer
snake_case_ = DebertaVaTokenizerFast
snake_case_ = True
snake_case_ = True
def __magic_name__ ( self : List[str] ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : str =DebertaVaTokenizer(__lowercase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : str , __lowercase : Any ) -> Any:
SCREAMING_SNAKE_CASE__ : Any ='''this is a test'''
SCREAMING_SNAKE_CASE__ : Any ='''this is a test'''
return input_text, output_text
def __magic_name__ ( self : str ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Dict ='''<pad>'''
SCREAMING_SNAKE_CASE__ : Tuple =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def __magic_name__ ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(__lowercase ) , 3_00_01 )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def __magic_name__ ( self : Tuple ) -> str:
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[int] =''' \tHeLLo!how \n Are yoU? '''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] =DebertaVaTokenizer(__lowercase , do_lower_case=__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) )
self.assertListEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =DebertaVaTokenizerFast(__lowercase , do_lower_case=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) )
self.assertListEqual(__lowercase , __lowercase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def __magic_name__ ( self : Optional[Any] ) -> Tuple:
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def __magic_name__ ( self : str ) -> List[str]:
pass
def __magic_name__ ( self : Tuple ) -> Dict:
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
SCREAMING_SNAKE_CASE__ : List[str] =DebertaVaTokenizer(__lowercase , split_by_punct=__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) )
self.assertListEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =DebertaVaTokenizerFast(__lowercase , split_by_punct=__lowercase )
SCREAMING_SNAKE_CASE__ : str =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) )
self.assertListEqual(__lowercase , __lowercase )
def __magic_name__ ( self : Optional[int] ) -> List[str]:
# fmt: off
SCREAMING_SNAKE_CASE__ : List[Any] ='''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
SCREAMING_SNAKE_CASE__ : str =DebertaVaTokenizer(__lowercase , do_lower_case=__lowercase , split_by_punct=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) )
self.assertListEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : str =DebertaVaTokenizerFast(__lowercase , do_lower_case=__lowercase , split_by_punct=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) )
self.assertListEqual(__lowercase , __lowercase )
def __magic_name__ ( self : Any ) -> Optional[int]:
# fmt: off
SCREAMING_SNAKE_CASE__ : int ='''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE__ : Dict =['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] =DebertaVaTokenizer(__lowercase , do_lower_case=__lowercase , split_by_punct=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) )
self.assertListEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =DebertaVaTokenizerFast(__lowercase , do_lower_case=__lowercase , split_by_punct=__lowercase )
SCREAMING_SNAKE_CASE__ : str =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) )
self.assertListEqual(__lowercase , __lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> Dict:
# fmt: off
SCREAMING_SNAKE_CASE__ : List[Any] ='''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE__ : int =['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] =DebertaVaTokenizer(__lowercase , do_lower_case=__lowercase , split_by_punct=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) )
self.assertListEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =DebertaVaTokenizerFast(__lowercase , do_lower_case=__lowercase , split_by_punct=__lowercase )
SCREAMING_SNAKE_CASE__ : int =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) )
self.assertListEqual(__lowercase , __lowercase )
def __magic_name__ ( self : Optional[Any] ) -> Dict:
# fmt: off
SCREAMING_SNAKE_CASE__ : Dict =''' \tHeLLo!how \n Are yoU? '''
SCREAMING_SNAKE_CASE__ : Optional[int] =['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
SCREAMING_SNAKE_CASE__ : str =DebertaVaTokenizer(__lowercase , do_lower_case=__lowercase , split_by_punct=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) )
self.assertListEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : str =DebertaVaTokenizerFast(__lowercase , do_lower_case=__lowercase , split_by_punct=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) )
self.assertListEqual(__lowercase , __lowercase )
def __magic_name__ ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ : List[str] =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : List[str] =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : int ='''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) )
self.assertListEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : int =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def __magic_name__ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''This is a test'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[13, 1, 43_98, 25, 21, 12_89]
SCREAMING_SNAKE_CASE__ : Optional[int] =['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] =['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] =DebertaVaTokenizer(__lowercase , keep_accents=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =DebertaVaTokenizerFast(__lowercase , keep_accents=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : str =rust_tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# fmt: off
SCREAMING_SNAKE_CASE__ : Tuple ='''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE__ : List[str] =[13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
SCREAMING_SNAKE_CASE__ : Tuple =['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
SCREAMING_SNAKE_CASE__ : Optional[int] =['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Any =tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : str =rust_tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : str =DebertaVaTokenizer(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =tokenizer.encode('''sequence builders''' )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode('''multi-sequence build''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.build_inputs_with_special_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __lowercase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __lowercase , )
@slow
def __magic_name__ ( self : Optional[int] ) -> List[str]:
# fmt: off
SCREAMING_SNAKE_CASE__ : Any ={'''input_ids''': [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 296
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 296
| 1
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase ):
snake_case_ = """pixel_values"""
snake_case_ = False
snake_case_ = TimmBackboneConfig
def __init__( self : Union[str, Any] , __lowercase : Optional[Any] , **__lowercase : Optional[int] ) -> Any:
requires_backends(self , '''timm''' )
super().__init__(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(F"backbone {config.backbone} is not supported by timm." )
if hasattr(__lowercase , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
SCREAMING_SNAKE_CASE__ : Any =getattr(__lowercase , '''use_pretrained_backbone''' , __lowercase )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
SCREAMING_SNAKE_CASE__ : Optional[int] =config.out_indices if getattr(__lowercase , '''out_indices''' , __lowercase ) is not None else (-1,)
SCREAMING_SNAKE_CASE__ : str =timm.create_model(
config.backbone , pretrained=__lowercase , features_only=config.features_only , in_chans=config.num_channels , out_indices=__lowercase , **__lowercase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
SCREAMING_SNAKE_CASE__ : List[str] =self._backbone.return_layers
SCREAMING_SNAKE_CASE__ : List[Any] ={layer['''module''']: str(__lowercase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(__lowercase )
@classmethod
def __magic_name__ ( cls : str , __lowercase : Optional[Any] , *__lowercase : str , **__lowercase : Optional[Any] ) -> Dict:
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
SCREAMING_SNAKE_CASE__ : Optional[int] =kwargs.pop('''config''' , TimmBackboneConfig() )
SCREAMING_SNAKE_CASE__ : str =kwargs.pop('''use_timm_backbone''' , __lowercase )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =kwargs.pop('''num_channels''' , config.num_channels )
SCREAMING_SNAKE_CASE__ : Tuple =kwargs.pop('''features_only''' , config.features_only )
SCREAMING_SNAKE_CASE__ : str =kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
SCREAMING_SNAKE_CASE__ : Any =kwargs.pop('''out_indices''' , config.out_indices )
SCREAMING_SNAKE_CASE__ : List[Any] =TimmBackboneConfig(
backbone=__lowercase , num_channels=__lowercase , features_only=__lowercase , use_pretrained_backbone=__lowercase , out_indices=__lowercase , )
return super()._from_config(__lowercase , **__lowercase )
def __magic_name__ ( self : Optional[int] , __lowercase : str ) -> str:
pass
def __magic_name__ ( self : Tuple , __lowercase : Tuple , __lowercase : int=None , __lowercase : Optional[int]=None , __lowercase : List[str]=None , **__lowercase : Dict ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ : Optional[Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ : List[Any] =output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
SCREAMING_SNAKE_CASE__ : Optional[int] =self._all_layers
SCREAMING_SNAKE_CASE__ : str =self._backbone(__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =self._return_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] =tuple(hidden_states[i] for i in self.out_indices )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self._backbone(__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
SCREAMING_SNAKE_CASE__ : Tuple =tuple(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =tuple(__lowercase ) if hidden_states is not None else None
if not return_dict:
SCREAMING_SNAKE_CASE__ : Dict =(feature_maps,)
if output_hidden_states:
SCREAMING_SNAKE_CASE__ : Dict =output + (hidden_states,)
return output
return BackboneOutput(feature_maps=__lowercase , hidden_states=__lowercase , attentions=__lowercase )
| 665
|
'''simple docstring'''
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] =True
for i in range(UpperCamelCase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE__ : Optional[int] =True
if a[i].islower():
SCREAMING_SNAKE_CASE__ : List[Any] =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665
| 1
|
"""simple docstring"""
from math import sqrt
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCamelCase ( UpperCamelCase__ = 1_0001 ):
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(UpperCamelCase__ ):
count += 1
while count != nth:
number += 2
if is_prime(UpperCamelCase__ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 657
|
"""simple docstring"""
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 10 - x * x
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) >= 0:
raise ValueError("Wrong space!" )
_UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(UpperCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) < 0:
_UpperCAmelCase = c
else:
_UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 657
| 1
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_a : List[Any] = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 128,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase):
@classmethod
def lowerCamelCase__ ( cls ):
_snake_case : Union[str, Any] = TOKEN
HfFolder.save_token(snake_case_ )
@classmethod
def lowerCamelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def lowerCamelCase__ ( self ):
_snake_case : str = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
_snake_case : List[Any] = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case_ , repo_id="test-config" , push_to_hub=snake_case_ , use_auth_token=self._token )
_snake_case : Tuple = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
_snake_case : List[Any] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case_ , repo_id="valid_org/test-config-org" , push_to_hub=snake_case_ , use_auth_token=self._token )
_snake_case : List[Any] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
CustomConfig.register_for_auto_class()
_snake_case : List[Any] = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
_snake_case : Optional[int] = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config' , trust_remote_code=snake_case_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_snake_case : Optional[Any] = c.n_embd + 1 # int
_snake_case : Tuple = c.resid_pdrop + 1.0 # float
_snake_case : int = not c.scale_attn_weights # bool
_snake_case : str = c.summary_type + "foo" # str
c.update_from_string(
F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(snake_case_ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(snake_case_ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(snake_case_ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(snake_case_ , c.summary_type , "mismatch for key: summary_type" )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = PretrainedConfig()
_snake_case : Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
snake_case_ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
_snake_case : int = [key for key, value in config_common_kwargs.items() if value == getattr(snake_case_ , snake_case_ )]
if len(snake_case_ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F' {", ".join(snake_case_ )}.' )
def lowerCamelCase__ ( self ):
with self.assertRaises(snake_case_ ):
# config is in subfolder, the following should not work without specifying the subfolder
_snake_case : Optional[int] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
_snake_case : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(snake_case_ )
def lowerCamelCase__ ( self ):
# A mock response for an HTTP head request to emulate server down
_snake_case : Optional[int] = mock.Mock()
_snake_case : Dict = 5_00
_snake_case : Union[str, Any] = {}
_snake_case : List[Any] = HTTPError
_snake_case : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
_snake_case : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case_ ) as mock_head:
_snake_case : str = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase__ ( self ):
# This test is for deprecated behavior and can be removed in v5
_snake_case : Union[str, Any] = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = AutoConfig.from_pretrained("bert-base-cased" )
_snake_case : Dict = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(snake_case_ )
_snake_case : Optional[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(snake_case_ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_snake_case : List[Any] = AutoConfig.from_pretrained(snake_case_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_snake_case : Optional[Any] = ["config.42.0.0.json"]
_snake_case : str = 7_68
configuration.save_pretrained(snake_case_ )
shutil.move(os.path.join(snake_case_ , "config.4.0.0.json" ) , os.path.join(snake_case_ , "config.42.0.0.json" ) )
_snake_case : Any = AutoConfig.from_pretrained(snake_case_ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def lowerCamelCase__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_snake_case : Optional[Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
_snake_case : Tuple = "v4.0.0"
_snake_case : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
snake_case_ , return_unused_kwargs=snake_case_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(snake_case_ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_snake_case : str = "v3.0.0"
_snake_case : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(snake_case_ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 707
|
"""simple docstring"""
from __future__ import annotations
import requests
_a : List[str] = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def a__ ( a : str , a : int = 1 , a : str = "new" , a : list | None = None ):
"""simple docstring"""
_snake_case : Any = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(a ) - valid_terms ) ):
_snake_case : Optional[int] = f'Invalid search term: {invalid_search_terms}'
raise ValueError(a )
_snake_case : int = requests.get(
f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
_snake_case : Optional[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(a )}
_snake_case : Tuple = {}
for id_ in range(a ):
_snake_case : List[str] = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 87
| 0
|
from __future__ import annotations
import numpy as np
def A__ ( _a : np.ndarray ):
'''simple docstring'''
snake_case__ , snake_case__ : str =np.shape(_a )
if rows != columns:
snake_case__ : Any =(
"""'table' has to be of square shaped array but got a """
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(_a )
snake_case__ : Dict =np.zeros((rows, columns) )
snake_case__ : str =np.zeros((rows, columns) )
for i in range(_a ):
for j in range(_a ):
snake_case__ : Optional[int] =sum(lower[i][k] * upper[k][j] for k in range(_a ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
snake_case__ : List[Any] =(table[i][j] - total) / upper[j][j]
snake_case__ : Optional[int] =1
for j in range(_a , _a ):
snake_case__ : int =sum(lower[i][k] * upper[k][j] for k in range(_a ) )
snake_case__ : Dict =table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 385
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : int = 16
__lowerCamelCase : int = 32
def A__ ( _a : Accelerator , _a : int = 16 ):
'''simple docstring'''
snake_case__ : List[Any] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
snake_case__ : Union[str, Any] =load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_a : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Any =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_a , max_length=_a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ : List[Any] =datasets.map(
_a , batched=_a , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : List[str] =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_a : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ : Optional[int] =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ : str =16
elif accelerator.mixed_precision != "no":
snake_case__ : Optional[Any] =8
else:
snake_case__ : int =None
return tokenizer.pad(
_a , padding="""longest""" , max_length=_a , pad_to_multiple_of=_a , return_tensors="""pt""" , )
# Instantiate dataloaders.
snake_case__ : Tuple =DataLoader(
tokenized_datasets["""train"""] , shuffle=_a , collate_fn=_a , batch_size=_a )
snake_case__ : Tuple =DataLoader(
tokenized_datasets["""validation"""] , shuffle=_a , collate_fn=_a , batch_size=_a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : Dict = mocked_dataloaders # noqa: F811
def A__ ( _a : List[Any] , _a : Tuple ):
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _a ) == "1":
snake_case__ : Union[str, Any] =2
# New Code #
snake_case__ : int =int(args.gradient_accumulation_steps )
# Initialize accelerator
snake_case__ : Any =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_a )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : Union[str, Any] =config["""lr"""]
snake_case__ : Dict =int(config["""num_epochs"""] )
snake_case__ : Tuple =int(config["""seed"""] )
snake_case__ : Dict =int(config["""batch_size"""] )
snake_case__ : str =evaluate.load("""glue""" , """mrpc""" )
set_seed(_a )
snake_case__ , snake_case__ : List[Any] =get_dataloaders(_a , _a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : str =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ : str =model.to(accelerator.device )
# Instantiate optimizer
snake_case__ : str =AdamW(params=model.parameters() , lr=_a )
# Instantiate scheduler
snake_case__ : List[Any] =get_linear_schedule_with_warmup(
optimizer=_a , num_warmup_steps=100 , num_training_steps=(len(_a ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : str =accelerator.prepare(
_a , _a , _a , _a , _a )
# Now we train the model
for epoch in range(_a ):
model.train()
for step, batch in enumerate(_a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_a ):
snake_case__ : Optional[int] =model(**_a )
snake_case__ : str =output.loss
accelerator.backward(_a )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : str =model(**_a )
snake_case__ : int =outputs.logits.argmax(dim=-1 )
snake_case__ , snake_case__ : Any =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_a , references=_a , )
snake_case__ : List[str] =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _a )
def A__ ( ):
'''simple docstring'''
snake_case__ : Union[str, Any] =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_a , default=_a , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=_a , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
snake_case__ : List[Any] =parser.parse_args()
snake_case__ : Optional[Any] ={"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_a , _a )
if __name__ == "__main__":
main()
| 385
| 1
|
'''simple docstring'''
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase : Tuple = _modexpt(__magic_name__ , exponent // 2 , __magic_name__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__magic_name__ , exponent - 1 , __magic_name__ )) % modulo_value
def lowercase ( __magic_name__ = 1777 , __magic_name__ = 1855 , __magic_name__ = 8 ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = base
for _ in range(1 , __magic_name__ ):
UpperCAmelCase : Any = _modexpt(__magic_name__ , __magic_name__ , 10**digits )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 609
|
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(snake_case , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(snake_case , "num_encoder_blocks" ) )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=6_4 , snake_case=3 , snake_case=4 , snake_case=[2, 2, 2, 2] , snake_case=[8, 4, 2, 1] , snake_case=[1_6, 3_2, 6_4, 1_2_8] , snake_case=[1, 4, 8, 1_6] , snake_case=[1, 2, 4, 8] , snake_case=True , snake_case=True , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=0.02 , snake_case=3 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : str = batch_size
UpperCAmelCase : Dict = image_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : List[Any] = num_encoder_blocks
UpperCAmelCase : Dict = sr_ratios
UpperCAmelCase : Union[str, Any] = depths
UpperCAmelCase : Optional[Any] = hidden_sizes
UpperCAmelCase : Union[str, Any] = downsampling_rates
UpperCAmelCase : Tuple = num_attention_heads
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : Any = use_labels
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase : Any = attention_probs_dropout_prob
UpperCAmelCase : int = initializer_range
UpperCAmelCase : List[str] = num_labels
UpperCAmelCase : Optional[Any] = scope
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase : int = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = SegformerModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(snake_case )
UpperCAmelCase : List[str] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Dict = self.num_labels
UpperCAmelCase : Optional[Any] = SegformerForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : str = model(snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
UpperCAmelCase : Dict = model(snake_case , labels=snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = 1
UpperCAmelCase : List[Any] = SegformerForSemanticSegmentation(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Any = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(snake_case )
UpperCAmelCase : Optional[int] = model(snake_case , labels=snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = config_and_inputs
UpperCAmelCase : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : int = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : int = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = SegformerModelTester(self )
UpperCAmelCase : Dict = SegformerConfigTester(self , config_class=snake_case )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*snake_case )
@unittest.skip("SegFormer does not use inputs_embeds" )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(snake_case )
UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = True
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = True
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Dict = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase : str = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : int = outputs.attentions
UpperCAmelCase : Union[str, Any] = sum(self.model_tester.depths )
self.assertEqual(len(snake_case ) , snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase : Any = True
UpperCAmelCase : int = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : Any = outputs.attentions
self.assertEqual(len(snake_case ) , snake_case )
# verify the first attentions (first block, first layer)
UpperCAmelCase : Optional[int] = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase : Dict = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
UpperCAmelCase : List[str] = (self.model_tester.image_size // 3_2) ** 2
UpperCAmelCase : int = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
UpperCAmelCase : Any = len(snake_case )
# Check attention is always last and order is fine
UpperCAmelCase : Dict = True
UpperCAmelCase : int = True
UpperCAmelCase : Optional[Any] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
self.assertEqual(out_len + 1 , len(snake_case ) )
UpperCAmelCase : int = outputs.attentions
self.assertEqual(len(snake_case ) , snake_case )
# verify the first attentions (first block, first layer)
UpperCAmelCase : Optional[int] = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase : str = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def A_ ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case , snake_case , snake_case ):
UpperCAmelCase : Optional[Any] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : List[str] = outputs.hidden_states
UpperCAmelCase : Optional[Any] = self.model_tester.num_encoder_blocks
self.assertEqual(len(snake_case ) , snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case ):
continue
UpperCAmelCase : Optional[int] = model_class(snake_case )
model.to(snake_case )
model.train()
UpperCAmelCase : List[Any] = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
UpperCAmelCase : Tuple = model(**snake_case ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A_ ( self ):
'''simple docstring'''
pass
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = SegformerModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case , align=snake_case , do_random_crop=snake_case )
UpperCAmelCase : Any = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
snake_case )
UpperCAmelCase : Optional[int] = prepare_img()
UpperCAmelCase : List[Any] = image_processor(images=snake_case , return_tensors="pt" )
UpperCAmelCase : List[Any] = encoded_inputs.pixel_values.to(snake_case )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(snake_case )
UpperCAmelCase : int = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCAmelCase : List[str] = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case , align=snake_case , do_random_crop=snake_case )
UpperCAmelCase : Dict = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(snake_case )
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : Tuple = image_processor(images=snake_case , return_tensors="pt" )
UpperCAmelCase : Optional[Any] = encoded_inputs.pixel_values.to(snake_case )
with torch.no_grad():
UpperCAmelCase : int = model(snake_case )
UpperCAmelCase : List[Any] = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCAmelCase : int = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , snake_case , atol=1e-1 ) )
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case , align=snake_case , do_random_crop=snake_case )
UpperCAmelCase : Union[str, Any] = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
snake_case )
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Any = image_processor(images=snake_case , return_tensors="pt" )
UpperCAmelCase : List[str] = encoded_inputs.pixel_values.to(snake_case )
with torch.no_grad():
UpperCAmelCase : str = model(snake_case )
UpperCAmelCase : int = outputs.logits.detach().cpu()
UpperCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case , target_sizes=[(5_0_0, 3_0_0)] )
UpperCAmelCase : Tuple = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , snake_case )
UpperCAmelCase : Tuple = image_processor.post_process_semantic_segmentation(outputs=snake_case )
UpperCAmelCase : List[str] = torch.Size((1_2_8, 1_2_8) )
self.assertEqual(segmentation[0].shape , snake_case )
| 609
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.