code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
A__ : int = [
'Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
A__ : Optional[int] = [
'Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .'
' Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def a ( ):
'''simple docstring'''
lowercase__ = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bootstrap_aggregation=SCREAMING_SNAKE_CASE_ , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bootstrap_aggregation=SCREAMING_SNAKE_CASE_ , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def a ( ):
'''simple docstring'''
lowercase__ = '''rougeLsum'''
lowercase__ = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ , rouge_keys=[k] )[k]
lowercase__ = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def a ( ):
'''simple docstring'''
lowercase__ = ['''rouge1''', '''rouge2''', '''rougeL''']
lowercase__ = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ , rouge_keys=SCREAMING_SNAKE_CASE_ )
lowercase__ = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ , rouge_keys=SCREAMING_SNAKE_CASE_ )
assert score_sep == score_no_sep
def a ( ):
'''simple docstring'''
lowercase__ = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .''',
]
lowercase__ = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ ) == calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , newline_sep=SCREAMING_SNAKE_CASE_ )
def a ( ):
'''simple docstring'''
lowercase__ = [
'''\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" '''
]
lowercase__ = [
''' Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
lowercase__ = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rouge_keys=['''rougeLsum'''] , newline_sep=SCREAMING_SNAKE_CASE_ )['''rougeLsum''']
lowercase__ = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def a ( ):
'''simple docstring'''
lowercase__ = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
lowercase__ = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=SCREAMING_SNAKE_CASE_ )
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 207 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
UpperCamelCase__ = {
"""configuration_audio_spectrogram_transformer""": [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ASTConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ASTForAudioClassification""",
"""ASTModel""",
"""ASTPreTrainedModel""",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["""ASTFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 | 0 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase__ ( _A , _A , _A = 1 / sqrt(2 ) ):
a : Dict = tau * frequency / samplerate
a : int = sin(SCREAMING_SNAKE_CASE_ )
a : List[Any] = cos(SCREAMING_SNAKE_CASE_ )
a : Tuple = _sin / (2 * q_factor)
a : Dict = (1 - _cos) / 2
a : Any = 1 - _cos
a : List[Any] = 1 + alpha
a : int = -2 * _cos
a : List[Any] = 1 - alpha
a : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( _A , _A , _A = 1 / sqrt(2 ) ):
a : List[str] = tau * frequency / samplerate
a : Tuple = sin(SCREAMING_SNAKE_CASE_ )
a : Optional[int] = cos(SCREAMING_SNAKE_CASE_ )
a : int = _sin / (2 * q_factor)
a : str = (1 + _cos) / 2
a : List[Any] = -1 - _cos
a : Dict = 1 + alpha
a : int = -2 * _cos
a : str = 1 - alpha
a : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( _A , _A , _A = 1 / sqrt(2 ) ):
a : List[Any] = tau * frequency / samplerate
a : List[str] = sin(SCREAMING_SNAKE_CASE_ )
a : Tuple = cos(SCREAMING_SNAKE_CASE_ )
a : List[Any] = _sin / (2 * q_factor)
a : Dict = _sin / 2
a : int = 0
a : str = -ba
a : Optional[Any] = 1 + alpha
a : Optional[int] = -2 * _cos
a : Dict = 1 - alpha
a : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( _A , _A , _A = 1 / sqrt(2 ) ):
a : Optional[int] = tau * frequency / samplerate
a : Optional[int] = sin(SCREAMING_SNAKE_CASE_ )
a : Optional[Any] = cos(SCREAMING_SNAKE_CASE_ )
a : Tuple = _sin / (2 * q_factor)
a : Union[str, Any] = 1 - alpha
a : List[str] = -2 * _cos
a : Optional[int] = 1 + alpha
a : Optional[int] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( _A , _A , _A , _A = 1 / sqrt(2 ) , ):
a : Dict = tau * frequency / samplerate
a : Any = sin(SCREAMING_SNAKE_CASE_ )
a : List[Any] = cos(SCREAMING_SNAKE_CASE_ )
a : List[Any] = _sin / (2 * q_factor)
a : str = 10 ** (gain_db / 40)
a : int = 1 + alpha * big_a
a : List[Any] = -2 * _cos
a : str = 1 - alpha * big_a
a : Union[str, Any] = 1 + alpha / big_a
a : int = -2 * _cos
a : Union[str, Any] = 1 - alpha / big_a
a : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( _A , _A , _A , _A = 1 / sqrt(2 ) , ):
a : Optional[Any] = tau * frequency / samplerate
a : str = sin(SCREAMING_SNAKE_CASE_ )
a : int = cos(SCREAMING_SNAKE_CASE_ )
a : Tuple = _sin / (2 * q_factor)
a : Optional[int] = 10 ** (gain_db / 40)
a : Optional[int] = (big_a + 1) - (big_a - 1) * _cos
a : Optional[Any] = (big_a + 1) + (big_a - 1) * _cos
a : List[Any] = (big_a - 1) - (big_a + 1) * _cos
a : Optional[int] = (big_a - 1) + (big_a + 1) * _cos
a : Union[str, Any] = 2 * sqrt(SCREAMING_SNAKE_CASE_ ) * alpha
a : Tuple = big_a * (pmc + aaa)
a : Dict = 2 * big_a * mpc
a : List[str] = big_a * (pmc - aaa)
a : Tuple = ppmc + aaa
a : Union[str, Any] = -2 * pmpc
a : Tuple = ppmc - aaa
a : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase__ ( _A , _A , _A , _A = 1 / sqrt(2 ) , ):
a : str = tau * frequency / samplerate
a : Any = sin(SCREAMING_SNAKE_CASE_ )
a : List[str] = cos(SCREAMING_SNAKE_CASE_ )
a : List[Any] = _sin / (2 * q_factor)
a : str = 10 ** (gain_db / 40)
a : Any = (big_a + 1) - (big_a - 1) * _cos
a : str = (big_a + 1) + (big_a - 1) * _cos
a : Optional[int] = (big_a - 1) - (big_a + 1) * _cos
a : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos
a : str = 2 * sqrt(SCREAMING_SNAKE_CASE_ ) * alpha
a : Dict = big_a * (ppmc + aaa)
a : Optional[int] = -2 * big_a * pmpc
a : List[str] = big_a * (ppmc - aaa)
a : List[Any] = pmc + aaa
a : Union[str, Any] = 2 * mpc
a : Union[str, Any] = pmc - aaa
a : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt | 297 |
import argparse
import os
import re
import packaging.version
UpperCamelCase__ = """examples/"""
UpperCamelCase__ = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
UpperCamelCase__ = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
UpperCamelCase__ = """README.md"""
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ):
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern]
__lowerCAmelCase = replace.replace("VERSION" , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = re_pattern.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , pattern="examples" )
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not patch:
update_version_in_examples(SCREAMING_SNAKE_CASE_ )
def _a ( ):
__lowerCAmelCase = "🤗 Transformers currently provides the following architectures"
__lowerCAmelCase = "1. Want to contribute a new model?"
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start of the list.
__lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__lowerCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
def _a ( ):
with open(REPLACE_FILES["init"] , "r" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(SCREAMING_SNAKE_CASE_ ).groups()[0]
return packaging.version.parse(SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : List[Any]=False ):
__lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__lowerCAmelCase = default_version.base_version
elif patch:
__lowerCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__lowerCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__lowerCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
__lowerCAmelCase = default_version
print(F"""Updating version to {version}.""" )
global_version_update(SCREAMING_SNAKE_CASE_ , patch=SCREAMING_SNAKE_CASE_ )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _a ( ):
__lowerCAmelCase = get_version()
__lowerCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
__lowerCAmelCase = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(SCREAMING_SNAKE_CASE_ )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
UpperCamelCase__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 92 | 0 |
def __lowerCamelCase ( lowerCamelCase__ : list ):
'''simple docstring'''
if any(not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(SCREAMING_SNAKE_CASE_ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 252 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a__ ( snake_case__ , unittest.TestCase ):
_a : Dict = KandinskyImgaImgPipeline
_a : List[Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
_a : str = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
_a : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_a : int = False
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 3_2
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 3_2
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.time_input_dim
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 1_0_0
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
__lowerCAmelCase = MultilingualCLIP(_A )
__lowerCAmelCase = text_encoder.eval()
return text_encoder
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__lowerCAmelCase = UNetaDConditionModel(**_A )
return model
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__lowerCAmelCase = DDIMScheduler(**_A )
__lowerCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A )
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_A ) ).to(_A )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(_A ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
if str(_A ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(_A )
else:
__lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
__lowerCAmelCase = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**_A )
__lowerCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(_A ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCAmelCase = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__lowerCAmelCase = "A red cartoon frog, 4k"
__lowerCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_A )
__lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__lowerCAmelCase = pipeline(
_A , image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_A , _A )
| 92 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
__a = TypeVar("T")
class UpperCAmelCase_ ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[str] , snake_case_ : Any ):
snake_case__ : Union[str, Any] = data
snake_case__ : str = self
snake_case__ : List[str] = 0
class UpperCAmelCase_ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Dict ):
snake_case__ : List[str] = {}
def lowerCamelCase ( self : Any , snake_case_ : List[Any] ):
snake_case__ : List[str] = DisjointSetTreeNode(_A )
def lowerCamelCase ( self : str , snake_case_ : Tuple ):
snake_case__ : Dict = self.map[data]
if elem_ref != elem_ref.parent:
snake_case__ : List[str] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowerCamelCase ( self : Tuple , snake_case_ : Any , snake_case_ : Tuple ):
if nodea.rank > nodea.rank:
snake_case__ : List[Any] = nodea
else:
snake_case__ : Dict = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowerCamelCase ( self : str , snake_case_ : List[Any] , snake_case_ : Any ):
self.link(self.find_set(_A ) , self.find_set(_A ) )
class UpperCAmelCase_ ( Generic[T] ):
"""simple docstring"""
def __init__( self : int ):
snake_case__ : List[str] = {}
def lowerCamelCase ( self : Tuple , snake_case_ : Optional[int] ):
if node not in self.connections:
snake_case__ : str = {}
def lowerCamelCase ( self : List[str] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Union[str, Any] ):
self.add_node(_A )
self.add_node(_A )
snake_case__ : Union[str, Any] = weight
snake_case__ : str = weight
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Any = []
snake_case__ : Union[str, Any] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case_ : x[2] )
# creating the disjoint set
snake_case__ : int = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_A )
# MST generation
snake_case__ : Tuple = 0
snake_case__ : Optional[Any] = 0
snake_case__ : Optional[Any] = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
snake_case__ , snake_case__ , snake_case__ : Any = edges[index]
index += 1
snake_case__ : Dict = disjoint_set.find_set(_A )
snake_case__ : int = disjoint_set.find_set(_A )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_A , _A , _A )
disjoint_set.union(_A , _A )
return graph
| 35 |
class a__ ( snake_case__ ):
pass
class a__ ( snake_case__ ):
pass
class a__ :
def __init__( self ):
"""simple docstring"""
__lowerCAmelCase = [
[],
[],
[],
]
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 1_0_0:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(_A )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ):
"""simple docstring"""
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class a__ :
def __init__( self ):
"""simple docstring"""
__lowerCAmelCase = []
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
if len(self.queue ) == 1_0_0:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
__lowerCAmelCase = min(self.queue )
self.queue.remove(_A )
return data
def __str__( self ):
"""simple docstring"""
return str(self.queue )
def _a ( ):
__lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(SCREAMING_SNAKE_CASE_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(SCREAMING_SNAKE_CASE_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _a ( ):
__lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(SCREAMING_SNAKE_CASE_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(SCREAMING_SNAKE_CASE_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 92 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Optional[int] = logging.get_logger(__name__)
def snake_case ( UpperCAmelCase )-> Tuple:
"""simple docstring"""
__A = SwinConfig(
embed_dim=1_9_2 , depths=(2, 2, 1_8, 2) , num_heads=(6, 1_2, 2_4, 4_8) , window_size=1_2 , out_features=['stage2', 'stage3', 'stage4'] , )
__A = DetaConfig(
backbone_config=SCREAMING_SNAKE_CASE_ , num_queries=9_0_0 , encoder_ffn_dim=2_0_4_8 , decoder_ffn_dim=2_0_4_8 , num_feature_levels=5 , assign_first_stage=SCREAMING_SNAKE_CASE_ , with_box_refine=SCREAMING_SNAKE_CASE_ , two_stage=SCREAMING_SNAKE_CASE_ , )
# set labels
__A = 'huggingface/label-files'
if "o365" in model_name:
__A = 3_6_6
__A = 'object365-id2label.json'
else:
__A = 9_1
__A = 'coco-detection-id2label.json'
__A = num_labels
__A = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) ) , 'r' ) )
__A = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
return config
def snake_case ( UpperCAmelCase )-> List[Any]:
"""simple docstring"""
__A = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> Dict:
"""simple docstring"""
__A = dct.pop(SCREAMING_SNAKE_CASE_ )
__A = val
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> Optional[Any]:
"""simple docstring"""
__A = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__A = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__A = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
__A = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__A = in_proj_weight[:dim, :]
__A = in_proj_bias[: dim]
__A = in_proj_weight[
dim : dim * 2, :
]
__A = in_proj_bias[
dim : dim * 2
]
__A = in_proj_weight[
-dim :, :
]
__A = in_proj_bias[-dim :]
# fmt: on
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> Optional[int]:
"""simple docstring"""
# transformer decoder self-attention layers
__A = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__A = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
__A = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__A = in_proj_weight[:hidden_size, :]
__A = in_proj_bias[:hidden_size]
__A = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__A = in_proj_bias[hidden_size : hidden_size * 2]
__A = in_proj_weight[-hidden_size:, :]
__A = in_proj_bias[-hidden_size:]
def snake_case ( )-> Optional[int]:
"""simple docstring"""
__A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__A = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> Any:
"""simple docstring"""
__A = get_deta_config(SCREAMING_SNAKE_CASE_ )
# load original state dict
if model_name == "deta-swin-large":
__A = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
__A = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'Model name {model_name} not supported' )
__A = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE_ , param.shape )
# rename keys
__A = create_rename_keys(SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_swin_q_k_v(SCREAMING_SNAKE_CASE_ , config.backbone_config )
read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__A = state_dict.pop(SCREAMING_SNAKE_CASE_ )
__A = val
if "input_proj" in key:
__A = state_dict.pop(SCREAMING_SNAKE_CASE_ )
__A = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__A = state_dict.pop(SCREAMING_SNAKE_CASE_ )
__A = val
# finally, create HuggingFace model and load state dict
__A = DetaForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
__A = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(SCREAMING_SNAKE_CASE_ )
# load image processor
__A = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
__A = prepare_img()
__A = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
__A = encoding['pixel_values']
__A = model(pixel_values.to(SCREAMING_SNAKE_CASE_ ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__A = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
__A = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
__A = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
__A = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(SCREAMING_SNAKE_CASE_ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(SCREAMING_SNAKE_CASE_ ) , atol=1e-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a__ : str = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 161 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a__ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , _A=2 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = num_patches + 2
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = DeiTModel(config=_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = DeiTForMaskedImageModeling(config=_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(_A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForMaskedImageModeling(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = DeiTForImageClassification(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForImageClassification(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : Optional[Any] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_a : int = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_a : Optional[Any] = False
_a : Tuple = False
_a : Tuple = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = DeiTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=False ):
"""simple docstring"""
__lowerCAmelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_A )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.train()
__lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A )
__lowerCAmelCase = model(**_A ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCAmelCase = False
__lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
__lowerCAmelCase = model_class(_A )
model.gradient_checkpointing_enable()
model.to(_A )
model.train()
__lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A )
__lowerCAmelCase = model(**_A ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_A ),
*get_values(_A ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ):
__lowerCAmelCase = problem_type["title"]
__lowerCAmelCase = problem_type["num_labels"]
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.train()
__lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A )
if problem_type["num_labels"] > 1:
__lowerCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
__lowerCAmelCase = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_A ) as warning_list:
__lowerCAmelCase = model(**_A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = DeiTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _a ( ):
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
_A )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_A , return_tensors="pt" ).to(_A )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**_A )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
__lowerCAmelCase = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_A , return_tensors="pt" )
__lowerCAmelCase = inputs.pixel_values.to(_A )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__lowerCAmelCase = model(_A )
| 92 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class _lowerCamelCase ( snake_case__ ):
UpperCAmelCase_ = ["""pixel_values"""]
def __init__(self , __a = True , __a = 32 , __a=PILImageResampling.BILINEAR , __a = True , **__a , ) -> List[str]:
UpperCamelCase = do_resize
UpperCamelCase = do_rescale
UpperCamelCase = size_divisor
UpperCamelCase = resample
super().__init__(**_A )
def snake_case_ (self , __a , __a , __a , __a = None , **__a ) -> Optional[int]:
UpperCamelCase , UpperCamelCase = get_image_size(_A )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCamelCase = height // size_divisor * size_divisor
UpperCamelCase = width // size_divisor * size_divisor
UpperCamelCase = resize(_A , (new_h, new_w) , resample=_A , data_format=_A , **_A )
return image
def snake_case_ (self , __a , __a , __a = None , **__a ) -> Tuple:
return rescale(image=_A , scale=_A , data_format=_A , **_A )
def snake_case_ (self , __a , __a = None , __a = None , __a=None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> Any:
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = size_divisor if size_divisor is not None else self.size_divisor
UpperCamelCase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
UpperCamelCase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(_A ) for img in images]
if do_resize:
UpperCamelCase = [self.resize(_A , size_divisor=_A , resample=_A ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(_A , scale=1 / 2_55 ) for image in images]
UpperCamelCase = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCamelCase = {"pixel_values": images}
return BatchFeature(data=_A , tensor_type=_A )
| 153 |
def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_00_00 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 92 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=snake_case__ ):
__UpperCAmelCase : Tuple = ["""onnx"""]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["onnx"] )
@classmethod
def lowerCamelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["onnx"] )
@classmethod
def lowerCamelCase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["onnx"] )
| 203 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 92 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : Optional[int] = logging.get_logger(__name__)
a : Any = torch.device('cpu')
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE_, stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
snake_case_ = dct.pop(SCREAMING_SNAKE_CASE_ )
snake_case_ = val
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = []
for k in state_dict.keys():
snake_case_ = k
if ".pwconv" in k:
snake_case_ = k_new.replace('''.pwconv''', '''.point_wise_conv''' )
if ".dwconv" in k:
snake_case_ = k_new.replace('''.dwconv''', '''.depth_wise_conv''' )
if ".Proj." in k:
snake_case_ = k_new.replace('''.Proj.''', '''.proj.''' )
if "patch_embed" in k_new:
snake_case_ = k_new.replace('''patch_embed''', '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
snake_case_ = k_new.split('''.''' )
if ls[2].isdigit():
snake_case_ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
snake_case_ = k_new.replace('''network''', '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Dict:
'''simple docstring'''
snake_case_ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
snake_case_ = 1000
snake_case_ = '''huggingface/label-files'''
snake_case_ = '''imagenet-1k-id2label.json'''
snake_case_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, repo_type='''dataset''' ), '''r''' ) )
snake_case_ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
snake_case_ = [3, 3, 6, 4]
snake_case_ = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
snake_case_ = [3, 3, 9, 6]
snake_case_ = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
snake_case_ = [4, 3, 10, 5]
snake_case_ = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
snake_case_ = [4, 4, 12, 6]
snake_case_ = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
snake_case_ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_, map_location='''cpu''', check_hash=SCREAMING_SNAKE_CASE_ )
else:
snake_case_ = torch.load(SCREAMING_SNAKE_CASE_, map_location='''cpu''' )
snake_case_ = checkpoint
snake_case_ = create_rename_keys(SCREAMING_SNAKE_CASE_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
snake_case_ = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# prepare test inputs
snake_case_ = prepare_img()
snake_case_ = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
snake_case_ = processor(images=SCREAMING_SNAKE_CASE_, return_tensors='''pt''' )
# compare outputs from both models
snake_case_ = get_expected_output(SCREAMING_SNAKE_CASE_ )
snake_case_ = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5], SCREAMING_SNAKE_CASE_, atol=1e-3 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
a : Any = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 56 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCamelCase__ = get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str=0 ):
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCAmelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCAmelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCAmelCase = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{MODEL_NAME}_{model_index}""" )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
logger.info(F"""Saving model to {ckpt_dir}""" )
__lowerCAmelCase = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=SCREAMING_SNAKE_CASE_ , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Model saved to {ckpt_dir}""" )
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(SCREAMING_SNAKE_CASE_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
__lowerCAmelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Loading model from {input_model_file}""" )
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCAmelCase = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Loading model from {input_model_file}""" )
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCAmelCase = (
os.path.join(SCREAMING_SNAKE_CASE_ , F"""{MODEL_NAME}_{model_index}""" )
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""" )
__lowerCAmelCase = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=SCREAMING_SNAKE_CASE_ , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , planner=DefaultLoadPlanner() , )
__lowerCAmelCase = state_dict["model"]
logger.info(F"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str=0 ):
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCAmelCase = FSDP.optim_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowerCAmelCase = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Optimizer state saved in {output_optimizer_file}""" )
else:
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
logger.info(F"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Optimizer state saved in {ckpt_dir}""" )
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCAmelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowerCAmelCase = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" )
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" )
else:
__lowerCAmelCase = (
os.path.join(SCREAMING_SNAKE_CASE_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""" )
__lowerCAmelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , )
__lowerCAmelCase = optim_state["optimizer"]
logger.info(F"""Optimizer loaded from {ckpt_dir}""" )
__lowerCAmelCase = FSDP.optim_state_dict_to_load(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
optimizer.load_state_dict(SCREAMING_SNAKE_CASE_ )
| 92 | 0 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a_ ( __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return EnvironmentCommand()
class __UpperCamelCase ( snake_case__ ):
@staticmethod
def lowercase__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =parser.add_parser('''env''' )
download_parser.set_defaults(func=_A )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =huggingface_hub.__version__
lowerCamelCase_ ='''not installed'''
lowerCamelCase_ ='''NA'''
if is_torch_available():
import torch
lowerCamelCase_ =torch.__version__
lowerCamelCase_ =torch.cuda.is_available()
lowerCamelCase_ ='''not installed'''
if is_transformers_available():
import transformers
lowerCamelCase_ =transformers.__version__
lowerCamelCase_ ='''not installed'''
if is_accelerate_available():
import accelerate
lowerCamelCase_ =accelerate.__version__
lowerCamelCase_ ='''not installed'''
if is_xformers_available():
import xformers
lowerCamelCase_ =xformers.__version__
lowerCamelCase_ ={
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_A ) )
return info
@staticmethod
def lowercase__ ( lowerCAmelCase ):
"""simple docstring"""
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 75 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a__ ( snake_case__ ):
def __init__( self , *_A , _A=None , _A=None , **_A ):
"""simple docstring"""
super().__init__(*_A , **_A )
__lowerCAmelCase = eval_examples
__lowerCAmelCase = post_process_function
def __SCREAMING_SNAKE_CASE( self , _A = None , _A=None , _A = None , _A = "eval" , **_A , ):
"""simple docstring"""
__lowerCAmelCase = gen_kwargs.copy()
__lowerCAmelCase = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
__lowerCAmelCase = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
__lowerCAmelCase = gen_kwargs
__lowerCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
__lowerCAmelCase = self.get_eval_dataloader(_A )
__lowerCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__lowerCAmelCase = self.compute_metrics
__lowerCAmelCase = None
__lowerCAmelCase = time.time()
__lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowerCAmelCase = eval_loop(
_A , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , )
finally:
__lowerCAmelCase = compute_metrics
__lowerCAmelCase = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__lowerCAmelCase = self.post_process_function(_A , _A , _A )
__lowerCAmelCase = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__lowerCAmelCase = metrics.pop(_A )
metrics.update(output.metrics )
else:
__lowerCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_A )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__lowerCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , _A )
return metrics
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=None , _A = "test" , **_A ):
"""simple docstring"""
__lowerCAmelCase = gen_kwargs.copy()
__lowerCAmelCase = self.get_test_dataloader(_A )
# Temporarily disable metric computation, we will do it in the loop here.
__lowerCAmelCase = self.compute_metrics
__lowerCAmelCase = None
__lowerCAmelCase = time.time()
__lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowerCAmelCase = eval_loop(
_A , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , )
finally:
__lowerCAmelCase = compute_metrics
__lowerCAmelCase = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__lowerCAmelCase = self.post_process_function(_A , _A , _A , "predict" )
__lowerCAmelCase = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__lowerCAmelCase = metrics.pop(_A )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_A )
| 92 | 0 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
_UpperCamelCase : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
_UpperCamelCase : Tuple = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class a ( snake_case__, unittest.TestCase ):
UpperCAmelCase_ : int =CamembertTokenizer
UpperCAmelCase_ : Dict =CamembertTokenizerFast
UpperCAmelCase_ : Tuple =True
UpperCAmelCase_ : List[Any] =True
def UpperCamelCase_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = CamembertTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ):
lowercase = '<pad>'
lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self ):
lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>NOTUSED' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_A ) , 1_0_0_4 )
def UpperCamelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 )
def UpperCamelCase_ ( self ):
lowercase = CamembertTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
lowercase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
lowercase = 'I was born in 92000, and this is falsé.'
lowercase = tokenizer.encode(_A )
lowercase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
lowercase = tokenizer.encode(_A , add_special_tokens=_A )
lowercase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
lowercase = tokenizer.convert_ids_to_tokens(_A )
lowercase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self ):
if not self.test_rust_tokenizer:
return
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
lowercase = 'I was born in 92000, and this is falsé.'
lowercase = tokenizer.tokenize(_A )
lowercase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
lowercase = tokenizer.encode(_A , add_special_tokens=_A )
lowercase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
lowercase = self.get_rust_tokenizer()
lowercase = tokenizer.encode(_A )
lowercase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
@slow
def UpperCamelCase_ ( self ):
lowercase = {'input_ids': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
lowercase = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=_A , )
| 220 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCAmelCase = filter(lambda SCREAMING_SNAKE_CASE_ : p.requires_grad , model.parameters() )
__lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCamelCase__ = logging.getLogger(__name__)
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ):
if metric == "rouge2":
__lowerCAmelCase = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
__lowerCAmelCase = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
__lowerCAmelCase = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
__lowerCAmelCase = ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , monitor=F"""val_{metric}""" , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , )
class a__ ( pl.Callback ):
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A=True ):
"""simple docstring"""
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowerCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
__lowerCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowerCAmelCase = od / "test_results.txt"
__lowerCAmelCase = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , "a+" ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowerCAmelCase = metrics[key]
if isinstance(_A , torch.Tensor ):
__lowerCAmelCase = val.item()
__lowerCAmelCase = f"""{key}: {val:.6f}\n"""
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
__lowerCAmelCase = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(_A )
@rank_zero_only
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
try:
__lowerCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
__lowerCAmelCase = pl_module.model.num_parameters()
__lowerCAmelCase = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , "test" )
@rank_zero_only
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 92 | 0 |
def __lowerCAmelCase ( a__ ) -> List[str]:
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 92 | 0 |
from __future__ import annotations
from collections.abc import Callable
A__ : Dict = list[list[float | int]]
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = len(SCREAMING_SNAKE_CASE_ )
lowercase__ = [[0 for _ in range(size + 1 )] for _ in range(SCREAMING_SNAKE_CASE_ )]
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
for row in range(SCREAMING_SNAKE_CASE_ ):
for col in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ = matrix[row][col]
lowercase__ = vector[row][0]
lowercase__ = 0
lowercase__ = 0
while row < size and col < size:
# pivoting
lowercase__ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowercase__ , lowercase__ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , SCREAMING_SNAKE_CASE_ ):
lowercase__ = augmented[rowa][col] / augmented[row][col]
lowercase__ = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , SCREAMING_SNAKE_CASE_ ):
for row in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ = augmented[row][col] / augmented[col][col]
for cola in range(SCREAMING_SNAKE_CASE_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(SCREAMING_SNAKE_CASE_ )
]
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = len(SCREAMING_SNAKE_CASE_ )
lowercase__ = [[0 for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )]
lowercase__ = [[0] for _ in range(SCREAMING_SNAKE_CASE_ )]
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
for x_val, y_val in enumerate(SCREAMING_SNAKE_CASE_ ):
for col in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ = (x_val + 1) ** (size - col - 1)
lowercase__ = y_val
lowercase__ = solve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def interpolated_func(lowerCamelCase_ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(SCREAMING_SNAKE_CASE_ ) )
return interpolated_func
def a ( lowerCamelCase_ ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def a ( lowerCamelCase_ = question_function , lowerCamelCase_ = 10 ):
'''simple docstring'''
lowercase__ = [func(SCREAMING_SNAKE_CASE_ ) for x_val in range(1 , order + 1 )]
lowercase__ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
lowercase__ = 0
lowercase__ = 42
lowercase__ = 42
for poly in polynomials:
lowercase__ = 1
while func(SCREAMING_SNAKE_CASE_ ) == poly(SCREAMING_SNAKE_CASE_ ):
x_val += 1
ret += poly(SCREAMING_SNAKE_CASE_ )
return ret
if __name__ == "__main__":
print(F"{solution() = }")
| 207 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def _a ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : set , SCREAMING_SNAKE_CASE_ : set , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : PriorityQueue , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowerCAmelCase = cst_fwd.get(SCREAMING_SNAKE_CASE_ , np.inf )
__lowerCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowerCAmelCase = new_cost_f
__lowerCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowerCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict ):
__lowerCAmelCase = -1
__lowerCAmelCase = set()
__lowerCAmelCase = set()
__lowerCAmelCase = {source: 0}
__lowerCAmelCase = {destination: 0}
__lowerCAmelCase = {source: None}
__lowerCAmelCase = {destination: None}
__lowerCAmelCase = PriorityQueue()
__lowerCAmelCase = PriorityQueue()
__lowerCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowerCAmelCase , __lowerCAmelCase = queue_forward.get()
visited_forward.add(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = queue_backward.get()
visited_backward.add(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = pass_and_relaxation(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__lowerCAmelCase = pass_and_relaxation(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowerCAmelCase = shortest_distance
return shortest_path_distance
UpperCamelCase__ = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
UpperCamelCase__ = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase: Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase: List[Any] = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class a__( snake_case__ ):
lowercase__ = """dpt"""
def __init__( self : Union[str, Any] , __snake_case : Union[str, Any]=7_68 , __snake_case : Optional[int]=12 , __snake_case : int=12 , __snake_case : Dict=30_72 , __snake_case : List[Any]="gelu" , __snake_case : List[str]=0.0 , __snake_case : Any=0.0 , __snake_case : Dict=0.02 , __snake_case : int=1e-1_2 , __snake_case : Any=3_84 , __snake_case : Optional[Any]=16 , __snake_case : Union[str, Any]=3 , __snake_case : Optional[int]=False , __snake_case : List[Any]=True , __snake_case : Any=[2, 5, 8, 11] , __snake_case : Optional[int]="project" , __snake_case : List[Any]=[4, 2, 1, 0.5] , __snake_case : Any=[96, 1_92, 3_84, 7_68] , __snake_case : Tuple=2_56 , __snake_case : Optional[Any]=-1 , __snake_case : Tuple=False , __snake_case : List[str]=True , __snake_case : Optional[Any]=0.4 , __snake_case : str=2_55 , __snake_case : int=0.1 , __snake_case : Any=[1, 10_24, 24, 24] , __snake_case : str=[0, 1] , __snake_case : Union[str, Any]=None , **__snake_case : List[Any] , ):
super().__init__(**_A )
a : Tuple = hidden_size
a : Any = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
a : str = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
a : Tuple = BitConfig(**_A )
elif isinstance(_A , _A ):
logger.info('Initializing the config with a `BiT` backbone.' )
a : Tuple = BitConfig(**_A )
elif isinstance(_A , _A ):
a : Optional[int] = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
a : Optional[Any] = backbone_featmap_shape
a : List[str] = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
a : str = None
a : str = None
a : Tuple = []
a : Tuple = num_hidden_layers
a : List[Any] = num_attention_heads
a : str = intermediate_size
a : Optional[Any] = hidden_act
a : Optional[Any] = hidden_dropout_prob
a : Optional[Any] = attention_probs_dropout_prob
a : List[str] = initializer_range
a : List[Any] = layer_norm_eps
a : Tuple = image_size
a : int = patch_size
a : Union[str, Any] = num_channels
a : List[Any] = qkv_bias
a : Optional[Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
a : int = readout_type
a : List[str] = reassemble_factors
a : Optional[int] = neck_hidden_sizes
a : Any = fusion_hidden_size
a : str = head_in_index
a : List[Any] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
a : Union[str, Any] = use_auxiliary_head
a : Tuple = auxiliary_loss_weight
a : Union[str, Any] = semantic_loss_ignore_index
a : str = semantic_classifier_dropout
def lowercase_ ( self : int ):
a : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
a : Union[str, Any] = self.backbone_config.to_dict()
a : Optional[Any] = self.__class__.model_type
return output | 297 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class a__ ( snake_case__ ):
_a : Optional[int] = """decision_transformer"""
_a : Optional[int] = ["""past_key_values"""]
_a : Dict = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _A=1_7 , _A=4 , _A=1_2_8 , _A=4_0_9_6 , _A=True , _A=1 , _A=1_0_2_4 , _A=3 , _A=1 , _A=None , _A="relu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=1E-5 , _A=0.02 , _A=True , _A=True , _A=5_0_2_5_6 , _A=5_0_2_5_6 , _A=False , _A=False , **_A , ):
"""simple docstring"""
__lowerCAmelCase = state_dim
__lowerCAmelCase = act_dim
__lowerCAmelCase = hidden_size
__lowerCAmelCase = max_ep_len
__lowerCAmelCase = action_tanh
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scale_attn_weights
__lowerCAmelCase = use_cache
__lowerCAmelCase = scale_attn_by_inverse_layer_idx
__lowerCAmelCase = reorder_and_upcast_attn
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
| 92 | 0 |
UpperCAmelCase : List[Any] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
UpperCAmelCase : Dict = [{"type": "code", "content": INSTALL_CONTENT}]
UpperCAmelCase : Optional[int] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 252 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_a : str = StableUnCLIPPipeline
_a : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
_a : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_a : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_a : Optional[Any] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = 3_2
__lowerCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__lowerCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
__lowerCAmelCase = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
__lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
__lowerCAmelCase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
__lowerCAmelCase = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL()
__lowerCAmelCase = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(_A )
else:
__lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
__lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
__lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase = pipe("anime turle" , generator=_A , output_type="np" )
__lowerCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 92 | 0 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def __snake_case( _lowerCAmelCase ) -> Tuple:
if num <= 0:
raise ValueError("""math domain error""" )
return quad(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , args=(SCREAMING_SNAKE_CASE_) )[0]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
return math.pow(SCREAMING_SNAKE_CASE_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 35 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase__ = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class UpperCamelCase__ :
def __init__( self :str , _A :int , _A :Optional[int]=13 , _A :Optional[Any]=7 , _A :List[str]=True , _A :Dict=True , _A :Dict=True , _A :int=True , _A :List[str]=99 , _A :int=32 , _A :Union[str, Any]=2 , _A :Optional[Any]=4 , _A :List[Any]=37 , _A :Union[str, Any]="gelu" , _A :Dict=0.1 , _A :int=0.1 , _A :Union[str, Any]=512 , _A :Dict=16 , _A :Optional[Any]=2 , _A :str=0.02 , _A :Dict=3 , _A :Tuple=4 , _A :Dict=None , _A :Union[str, Any]=1_000 , ) -> List[str]:
'''simple docstring'''
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
__A = range_bbox
def lowercase_ ( self :Tuple ) -> List[str]:
'''simple docstring'''
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
__A = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__A = bbox[i, j, 3]
__A = bbox[i, j, 1]
__A = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__A = bbox[i, j, 2]
__A = bbox[i, j, 0]
__A = t
__A = tf.convert_to_tensor(_A )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = ids_tensor([self.batch_size] , self.num_choices )
__A = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self :Union[str, Any] , _A :List[str] , _A :Any , _A :List[Any] , _A :Any , _A :Any , _A :Union[str, Any] , _A :Union[str, Any] , _A :Optional[Any] ) -> List[str]:
'''simple docstring'''
__A = TFLayoutLMModel(config=_A )
__A = model(_A , _A , attention_mask=_A , token_type_ids=_A )
__A = model(_A , _A , token_type_ids=_A )
__A = model(_A , _A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self :List[str] , _A :Optional[int] , _A :List[Any] , _A :int , _A :Dict , _A :Tuple , _A :Optional[int] , _A :Union[str, Any] , _A :List[Any] ) -> Optional[Any]:
'''simple docstring'''
__A = TFLayoutLMForMaskedLM(config=_A )
__A = model(_A , _A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self :List[str] , _A :List[Any] , _A :Tuple , _A :Dict , _A :Union[str, Any] , _A :Tuple , _A :Optional[Any] , _A :Optional[int] , _A :Any ) -> str:
'''simple docstring'''
__A = self.num_labels
__A = TFLayoutLMForSequenceClassification(config=_A )
__A = model(_A , _A , attention_mask=_A , token_type_ids=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self :Dict , _A :List[Any] , _A :Any , _A :List[str] , _A :List[Any] , _A :int , _A :List[Any] , _A :Optional[Any] , _A :Dict ) -> Any:
'''simple docstring'''
__A = self.num_labels
__A = TFLayoutLMForTokenClassification(config=_A )
__A = model(_A , _A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self :Tuple , _A :Optional[int] , _A :Optional[int] , _A :Optional[Any] , _A :str , _A :Dict , _A :int , _A :str , _A :int ) -> Dict:
'''simple docstring'''
__A = TFLayoutLMForQuestionAnswering(config=_A )
__A = model(_A , _A , attention_mask=_A , token_type_ids=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self :str ) -> Dict:
'''simple docstring'''
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( snake_case__ , snake_case__ , unittest.TestCase):
UpperCAmelCase__ : List[Any] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
{
"""feature-extraction""": TFLayoutLMModel,
"""fill-mask""": TFLayoutLMForMaskedLM,
"""text-classification""": TFLayoutLMForSequenceClassification,
"""token-classification""": TFLayoutLMForTokenClassification,
"""zero-shot""": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : List[str] = 1_0
def lowercase_ ( self :Any ) -> Tuple:
'''simple docstring'''
__A = TFLayoutLMModelTester(self )
__A = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self :Dict ) -> int:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def lowercase_ ( self :List[Any] ) -> List[str]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def lowercase_ ( self :Any ) -> Tuple:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def lowercase_ ( self :Dict ) -> List[Any]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@slow
def lowercase_ ( self :List[str] ) -> str:
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFLayoutLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def lowercase_ ( self :Tuple ) -> List[str]:
'''simple docstring'''
pass
def snake_case ( )-> Dict:
"""simple docstring"""
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
__A = tf.convert_to_tensor([[1_0_1,1_0_1_9,1_0_1_4,1_0_1_6,1_0_3_7,1_2_8_4_9,4_7_4_7,1_0_0_4,1_4_2_4_6,2_2_7_8,5_4_3_9,4_5_2_4,5_0_0_2,2_9_3_0,2_1_9_3,2_9_3_0,4_3_4_1,3_2_0_8,1_0_0_5,1_0_5_5,2_1_7_1,2_8_4_8,1_1_3_0_0,3_5_3_1,1_0_2],[1_0_1,4_0_7_0,4_0_3_4,7_0_2_0,1_0_2_4,3_0_5_8,1_0_1_5,1_0_1_3,2_8_6_1,1_0_1_3,6_0_7_0,1_9_2_7_4,2_7_7_2,6_2_0_5,2_7_8_1_4,1_6_1_4_7,1_6_1_4_7,4_3_4_3,2_0_4_7,1_0_2_8_3,1_0_9_6_9,1_4_3_8_9,1_0_1_2,2_3_3_8,1_0_2]] ) # noqa: E231
__A = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
__A = tf.convert_to_tensor([[[0,0,0,0],[4_2_3,2_3_7,4_4_0,2_5_1],[4_2_7,2_7_2,4_4_1,2_8_7],[4_1_9,1_1_5,4_3_7,1_2_9],[9_6_1,8_8_5,9_9_2,9_1_2],[2_5_6,3_8,3_3_0,5_8],[2_5_6,3_8,3_3_0,5_8],[3_3_6,4_2,3_5_3,5_7],[3_6_0,3_9,4_0_1,5_6],[3_6_0,3_9,4_0_1,5_6],[4_1_1,3_9,4_7_1,5_9],[4_7_9,4_1,5_2_8,5_9],[5_3_3,3_9,6_3_0,6_0],[6_7,1_1_3,1_3_4,1_3_1],[1_4_1,1_1_5,2_0_9,1_3_2],[6_8,1_4_9,1_3_3,1_6_6],[1_4_1,1_4_9,1_8_7,1_6_4],[1_9_5,1_4_8,2_8_7,1_6_5],[1_9_5,1_4_8,2_8_7,1_6_5],[1_9_5,1_4_8,2_8_7,1_6_5],[2_9_5,1_4_8,3_4_9,1_6_5],[4_4_1,1_4_9,4_9_2,1_6_6],[4_9_7,1_4_9,5_4_6,1_6_4],[6_4,2_0_1,1_2_5,2_1_8],[1_0_0_0,1_0_0_0,1_0_0_0,1_0_0_0]],[[0,0,0,0],[6_6_2,1_5_0,7_5_4,1_6_6],[6_6_5,1_9_9,7_4_2,2_1_1],[5_1_9,2_1_3,5_5_4,2_2_8],[5_1_9,2_1_3,5_5_4,2_2_8],[1_3_4,4_3_3,1_8_7,4_5_4],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[3_1_4,4_6_9,3_7_6,4_8_2],[5_0_4,6_8_4,5_8_2,7_0_6],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[6_1_0,7_4_9,6_5_2,7_6_5],[1_3_0,6_5_9,1_6_8,6_7_2],[1_7_6,6_5_7,2_3_7,6_7_2],[2_3_8,6_5_7,3_1_2,6_7_2],[4_4_3,6_5_3,6_2_8,6_7_2],[4_4_3,6_5_3,6_2_8,6_7_2],[7_1_6,3_0_1,8_2_5,3_1_7],[1_0_0_0,1_0_0_0,1_0_0_0,1_0_0_0]]] ) # noqa: E231
__A = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
__A = tf.convert_to_tensor([[-1_0_0,1_0,1_0,1_0,9,1,-1_0_0,7,7,-1_0_0,7,7,4,2,5,2,8,8,-1_0_0,-1_0_0,5,0,3,2,-1_0_0],[-1_0_0,1_2,1_2,1_2,-1_0_0,1_2,1_0,-1_0_0,-1_0_0,-1_0_0,-1_0_0,1_0,1_2,9,-1_0_0,-1_0_0,-1_0_0,1_0,1_0,1_0,9,1_2,-1_0_0,1_0,-1_0_0]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class UpperCamelCase__ ( unittest.TestCase):
@slow
def lowercase_ ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
__A = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
__A , __A , __A , __A , __A = prepare_layoutlm_batch_inputs()
# forward pass
__A = model(input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A )
# test the sequence output on [0, :3, :3]
__A = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1E-3 ) )
# test the pooled output on [1, :3]
__A = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _A , atol=1E-3 ) )
@slow
def lowercase_ ( self :Tuple ) -> Any:
'''simple docstring'''
__A = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
__A , __A , __A , __A , __A = prepare_layoutlm_batch_inputs()
# forward pass
__A = model(
input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
__A = outputs.loss
__A = (2,)
self.assertEqual(loss.shape , _A )
# test the shape of the logits
__A = outputs.logits
__A = (2, 2)
self.assertEqual(logits.shape , _A )
@slow
def lowercase_ ( self :Any ) -> int:
'''simple docstring'''
__A = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
__A , __A , __A , __A , __A = prepare_layoutlm_batch_inputs()
# forward pass
__A = model(
input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A , labels=_A )
# test the shape of the logits
__A = outputs.logits
__A = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _A )
@slow
def lowercase_ ( self :List[Any] ) -> List[str]:
'''simple docstring'''
__A = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
__A , __A , __A , __A , __A = prepare_layoutlm_batch_inputs()
# forward pass
__A = model(input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A )
# test the shape of the logits
__A = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _A )
self.assertEqual(outputs.end_logits.shape , _A )
| 161 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class a__ ( snake_case__ , unittest.TestCase ):
_a : Optional[Any] = DebertaVaTokenizer
_a : Optional[Any] = DebertaVaTokenizerFast
_a : List[str] = True
_a : Optional[Any] = True
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = DebertaVaTokenizer(_A , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = "this is a test"
__lowerCAmelCase = "this is a test"
return input_text, output_text
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(_A ) , 3_0_0_0_1 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = " \tHeLLo!how \n Are yoU? "
__lowerCAmelCase = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = " \tHeLLo!how \n Are yoU? "
__lowerCAmelCase = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(_A )
__lowerCAmelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "This is a test"
__lowerCAmelCase = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
__lowerCAmelCase = ["▁", "T", "his", "▁is", "▁a", "▁test"]
__lowerCAmelCase = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
__lowerCAmelCase = DebertaVaTokenizer(_A , keep_accents=_A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , keep_accents=_A )
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
# fmt: off
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
__lowerCAmelCase = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
__lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = DebertaVaTokenizer(_A )
__lowerCAmelCase = tokenizer.encode("sequence builders" )
__lowerCAmelCase = tokenizer.encode("multi-sequence build" )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 92 | 0 |
"""simple docstring"""
from math import isqrt
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) )
def a__ ( _SCREAMING_SNAKE_CASE = 10**6 ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = 1
UpperCamelCase = 7
while prime_candidate < max_prime:
primes_count += is_prime(SCREAMING_SNAKE_CASE_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 153 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCamelCase__ = logging.get_logger(__name__)
@dataclass
class a__ ( snake_case__ ):
_a : List[str] = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **_A ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowerCAmelCase = deprecated_arg[3:]
__lowerCAmelCase = not kwargs.pop(_A )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__lowerCAmelCase = kwargs.pop("tpu_name" , self.tpu_name )
__lowerCAmelCase = kwargs.pop("device_idx" , self.device_idx )
__lowerCAmelCase = kwargs.pop("eager_mode" , self.eager_mode )
__lowerCAmelCase = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**_A )
_a : str = field(
default=snake_case__ , metadata={"""help""": """Name of TPU"""} , )
_a : int = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
_a : bool = field(default=snake_case__ , metadata={"""help""": """Benchmark models in eager model."""} )
_a : bool = field(
default=snake_case__ , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
__lowerCAmelCase = None
if self.tpu:
try:
if self.tpu_name:
__lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__lowerCAmelCase = None
return tpu
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__lowerCAmelCase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
__lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
__lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.n_gpu > 0
| 92 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( snake_case__ ):
__UpperCAmelCase : str = """xlm-roberta"""
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , **UpperCamelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
snake_case : List[Any] = vocab_size
snake_case : Union[str, Any] = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : List[Any] = hidden_act
snake_case : List[str] = intermediate_size
snake_case : Any = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : str = max_position_embeddings
snake_case : str = type_vocab_size
snake_case : Tuple = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Dict = position_embedding_type
snake_case : Union[str, Any] = use_cache
snake_case : Union[str, Any] = classifier_dropout
class _lowerCAmelCase ( snake_case__ ):
@property
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 203 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
UpperCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
UpperCamelCase__ = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class a__ ( snake_case__ , unittest.TestCase ):
_a : int = CamembertTokenizer
_a : Dict = CamembertTokenizerFast
_a : Tuple = True
_a : List[Any] = True
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = CamembertTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(_A ) , 1_0_0_4 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = CamembertTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
__lowerCAmelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = tokenizer.encode(_A )
__lowerCAmelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A )
__lowerCAmelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = tokenizer.tokenize(_A )
__lowerCAmelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(_A )
__lowerCAmelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = {"input_ids": [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__lowerCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=_A , )
| 92 | 0 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
snake_case_ = flax_key_tuple[:-1] + ('''weight''',)
snake_case_ = torch.permute(SCREAMING_SNAKE_CASE_, (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE_ ):
# linear layer
snake_case_ = flax_key_tuple[:-1] + ('''weight''',)
snake_case_ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
snake_case_ = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if "metadata" in layer:
snake_case_ = layer.split('''metadata''' )
snake_case_ = ''''''.join(split_layer[0] )[:-1]
snake_case_ = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
snake_case_ = layer.split('''kvstore''' )
snake_case_ = ''''''.join(split_layer[0] )[:-1]
snake_case_ = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
snake_case_ = layer.split('''/''' )
snake_case_ = '''/'''.join(split_layer[:-1] )
snake_case_ = (split_layer[-1],)
if "kvstore/path" in layer:
snake_case_ = F"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
snake_case_ = '''file'''
else:
snake_case_ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict:
'''simple docstring'''
snake_case_ = rename_keys(SCREAMING_SNAKE_CASE_ )
snake_case_ = {}
for k, v in current_block.items():
snake_case_ = v
snake_case_ = new_current_block
torch.save(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = WEIGHTS_NAME ) -> str:
'''simple docstring'''
snake_case_ = convert_file_size_to_int(SCREAMING_SNAKE_CASE_ )
snake_case_ = []
snake_case_ = {}
snake_case_ = 0
snake_case_ = 0
os.makedirs(SCREAMING_SNAKE_CASE_, exist_ok=SCREAMING_SNAKE_CASE_ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''', '''rb''' ) as fp:
snake_case_ = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
snake_case_ = flatten_dict(SCREAMING_SNAKE_CASE_, sep='''/''' )
snake_case_ = {}
for layer in checkpoint_info.keys():
snake_case_ ,snake_case_ ,snake_case_ = get_key_and_tensorstore_dict(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if curr_real_layer_name in all_layers:
snake_case_ = content
else:
snake_case_ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
snake_case_ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE_ )
snake_case_ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
snake_case_ ,snake_case_ = rename_base_flax_keys(tuple(key.split('''/''' ) ), SCREAMING_SNAKE_CASE_ )
snake_case_ = '''/'''.join(SCREAMING_SNAKE_CASE_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
snake_case_ = os.path.join(
SCREAMING_SNAKE_CASE_, weights_name.replace('''.bin''', F"-{len(SCREAMING_SNAKE_CASE_ )+1:05d}-of-???.bin" ) )
rename_and_save_block(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
snake_case_ = {}
snake_case_ = 0
snake_case_ = raw_weights.to(getattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE_, weights_name.replace('''.bin''', F"-{len(SCREAMING_SNAKE_CASE_ )+1:05d}-of-???.bin" ) )
rename_and_save_block(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(SCREAMING_SNAKE_CASE_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
snake_case_ = {}
snake_case_ = {}
for idx, shard in enumerate(SCREAMING_SNAKE_CASE_ ):
snake_case_ = weights_name.replace(
'''.bin''', F"-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE_ ):05d}.bin" ) # len(sharded_state_dicts):05d}
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE_, weights_name.replace('''.bin''', F"-{idx+1:05d}-of-???.bin" ) )
os.rename(SCREAMING_SNAKE_CASE_, os.path.join(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
snake_case_ = shard
for key in shard:
snake_case_ = shard_file
# Add the metadata
snake_case_ = {'''total_size''': total_size}
snake_case_ = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ), '''w''', encoding='''utf-8''' ) as f:
snake_case_ = json.dumps(SCREAMING_SNAKE_CASE_, indent=2, sort_keys=SCREAMING_SNAKE_CASE_ ) + '''\n'''
f.write(SCREAMING_SNAKE_CASE_ )
return metadata, index
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
a : str = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __magic_name__ ( ) -> str:
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
snake_case_ = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
snake_case_ = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''', device_map='''auto''' )
snake_case_ = TaTokenizer.from_pretrained('''t5-small''' )
snake_case_ = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
snake_case_ = tokenizer(SCREAMING_SNAKE_CASE_, return_tensors='''pt''' ).input_ids
snake_case_ = model.generate(SCREAMING_SNAKE_CASE_, decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 56 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class a__ :
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
__lowerCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
__lowerCAmelCase = after_output[0].numpy()
__lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__lowerCAmelCase = model(
input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A )
__lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_A ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase = to_atuple(vision_model.config.image_size )
__lowerCAmelCase = to_atuple(vision_model.config.patch_size )
__lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(_A , _A , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_pretrained_model_and_inputs()
__lowerCAmelCase = model_a(**_A )
__lowerCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_A )
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A )
__lowerCAmelCase = model_a(**_A )
__lowerCAmelCase = after_outputs[0].numpy()
__lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
@require_tf
class a__ ( snake_case__ , unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
__lowerCAmelCase = 1_3
__lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCAmelCase = random_attention_mask([batch_size, 4] )
__lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFViTModel(_A , name="vision_model" )
__lowerCAmelCase = TFBertModel(_A , name="text_model" )
return vision_model, text_model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFViTModelTester(self )
__lowerCAmelCase = TFBertModelTester(self )
__lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class a__ ( snake_case__ , unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
__lowerCAmelCase = 1_3
__lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCAmelCase = random_attention_mask([batch_size, 4] )
__lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__lowerCAmelCase = model(
input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A )
__lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_A ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowerCAmelCase = to_atuple(vision_model.config.image_size )
__lowerCAmelCase = to_atuple(vision_model.config.patch_size )
__lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModel(_A , name="vision_model" )
__lowerCAmelCase = TFRobertaModel(_A , name="text_model" )
return vision_model, text_model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModelTester(self )
__lowerCAmelCase = TFRobertaModelTester(self )
__lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class a__ ( snake_case__ , unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
__lowerCAmelCase = 1_3
__lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCAmelCase = random_attention_mask([batch_size, 4] )
__lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFCLIPVisionModel(_A , name="vision_model" )
__lowerCAmelCase = TFBertModel(_A , name="text_model" )
return vision_model, text_model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFCLIPVisionModelTester(self )
__lowerCAmelCase = TFBertModelTester(self )
__lowerCAmelCase = clip_model_tester.prepare_config_and_inputs()
__lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=_A )
__lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__lowerCAmelCase = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_A , padding=_A , return_tensors="np" )
__lowerCAmelCase = model(**_A )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowerCAmelCase = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _A , atol=1E-3 ) )
| 92 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
a_ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class __UpperCamelCase ( snake_case__ ):
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
super().__init__(*_A, **_A )
requires_backends(self, '''vision''' )
self.check_model_type(_A )
def __call__( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return super().__call__(_A, **_A )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return {}, {}, {}
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =load_image(_A )
lowerCamelCase_ =image.size
lowerCamelCase_ =self.image_processor(images=_A, return_tensors=self.framework )
return model_inputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.model(**_A )
return model_outputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model_outputs.predicted_depth
lowerCamelCase_ =torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ), size=self.image_size[::-1], mode='''bicubic''', align_corners=_A )
lowerCamelCase_ =prediction.squeeze().cpu().numpy()
lowerCamelCase_ =(output * 255 / np.max(_A )).astype('''uint8''' )
lowerCamelCase_ =Image.fromarray(_A )
lowerCamelCase_ ={}
lowerCamelCase_ =predicted_depth
lowerCamelCase_ =depth
return output_dict
| 75 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
if hor == 1_28:
__lowerCAmelCase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
__lowerCAmelCase = (32, 1_28, 2_56)
__lowerCAmelCase = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
__lowerCAmelCase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
__lowerCAmelCase = (32, 64, 1_28, 2_56)
__lowerCAmelCase = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
__lowerCAmelCase = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
__lowerCAmelCase = model.state_dict()
__lowerCAmelCase = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_55_36,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
__lowerCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
__lowerCAmelCase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ )
torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a ( ):
__lowerCAmelCase = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 1_28, 2_56),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_55_36,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
__lowerCAmelCase = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
__lowerCAmelCase = model
__lowerCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
__lowerCAmelCase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 92 | 0 |
'''simple docstring'''
import torch
def snake_case_ ( ):
"""simple docstring"""
if torch.cuda.is_available():
lowercase_ : int = torch.cuda.device_count()
else:
lowercase_ : Optional[int] = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 93 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase : int = logging.get_logger(__name__)
_lowercase : List[Any] = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase_ = '''nat'''
lowerCAmelCase_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=[3, 4, 6, 5] , __SCREAMING_SNAKE_CASE=[2, 4, 8, 16] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = patch_size
lowercase_ : List[Any] = num_channels
lowercase_ : str = embed_dim
lowercase_ : List[str] = depths
lowercase_ : str = len(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = num_heads
lowercase_ : int = kernel_size
lowercase_ : Union[str, Any] = mlp_ratio
lowercase_ : Optional[int] = qkv_bias
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : Optional[int] = attention_probs_dropout_prob
lowercase_ : List[Any] = drop_path_rate
lowercase_ : List[Any] = hidden_act
lowercase_ : int = layer_norm_eps
lowercase_ : int = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase_ : Dict = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
lowercase_ : Tuple = layer_scale_init_value
lowercase_ : Union[str, Any] = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
lowercase_ , lowercase_ : int = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 93 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = AltDiffusionPipeline
lowerCAmelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase_ : Dict = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
lowercase_ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
lowercase_ : Dict = CLIPTextModel(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowercase_ : Union[str, Any] = 77
lowercase_ : Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
lowercase_ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Optional[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _snake_case ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ : List[Any] = self.get_dummy_components()
torch.manual_seed(0 )
lowercase_ : Optional[int] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase_ : Union[str, Any] = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = text_encoder
lowercase_ : int = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = '''A photo of an astronaut'''
lowercase_ : Optional[int] = alt_pipe(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = output.images
lowercase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ : List[str] = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ : int = self.get_dummy_components()
lowercase_ : Optional[int] = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
lowercase_ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase_ : str = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = text_encoder
lowercase_ : List[str] = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : str = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = alt_pipe(**__SCREAMING_SNAKE_CASE )
lowercase_ : Any = output.images
lowercase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ : Optional[Any] = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = '''A painting of a squirrel eating a burger'''
lowercase_ : int = torch.manual_seed(0 )
lowercase_ : Optional[Any] = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
lowercase_ : Tuple = output.images
lowercase_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase_ : List[str] = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowercase_ : Optional[Any] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = '''A painting of a squirrel eating a burger'''
lowercase_ : Optional[int] = torch.manual_seed(0 )
lowercase_ : Optional[Any] = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''numpy''' )
lowercase_ : Tuple = output.images
lowercase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase_ : str = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 93 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Union[str, Any] = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 93 | 1 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = [10, 20, 30, 40, 50, 60]
lowercase_ : Optional[Any] = [2, 4, 6, 8, 10, 12]
lowercase_ : Union[str, Any] = 1_00
self.assertEqual(kp.calc_profit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 2_10 )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Weight can not be negative.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Profit can not be negative.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 93 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = [10, 20, 30, 40, 50, 60]
lowercase_ : Optional[Any] = [2, 4, 6, 8, 10, 12]
lowercase_ : Union[str, Any] = 1_00
self.assertEqual(kp.calc_profit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 2_10 )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Weight can not be negative.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Profit can not be negative.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 93 | 1 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : list[list] ):
"""simple docstring"""
lowercase_ : Union[str, Any] = current_set.copy()
for row_index, row in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : List[Any] = row[0]
for column_index, column in enumerate(__SCREAMING_SNAKE_CASE ):
if magnitude == 0:
lowercase_ : Dict = column
continue
lowercase_ : Any = column / magnitude
# Subtract to cancel term
lowercase_ : Union[str, Any] = current_set[0]
lowercase_ : Dict = [first_row]
lowercase_ : Optional[Any] = current_set[1::]
for row in current_set:
lowercase_ : Optional[Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__SCREAMING_SNAKE_CASE )
continue
for column_index in range(len(__SCREAMING_SNAKE_CASE ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__SCREAMING_SNAKE_CASE )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowercase_ : List[Any] = final_set[0]
lowercase_ : Any = []
lowercase_ : List[str] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowercase_ : Optional[Any] = simplify(__SCREAMING_SNAKE_CASE )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , __SCREAMING_SNAKE_CASE )
lowercase_ : int = resultant
return final_set
def snake_case_ ( __SCREAMING_SNAKE_CASE : list[list] ):
"""simple docstring"""
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
lowercase_ : Dict = len(__SCREAMING_SNAKE_CASE ) + 1
if any(len(__SCREAMING_SNAKE_CASE ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(__SCREAMING_SNAKE_CASE ) == 1:
return [equations[0][-1] / equations[0][0]]
lowercase_ : Optional[int] = equations.copy()
if any(0 in row for row in data_set ):
lowercase_ : Tuple = data_set.copy()
lowercase_ : Any = []
for row_index, row in enumerate(__SCREAMING_SNAKE_CASE ):
if 0 not in row:
lowercase_ : Optional[Any] = data_set.pop(__SCREAMING_SNAKE_CASE )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , __SCREAMING_SNAKE_CASE )
lowercase_ : int = data_set.copy()
lowercase_ : List[str] = simplify(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = simplified[::-1]
lowercase_ : list = []
for row in simplified:
lowercase_ : Dict = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowercase_ : Optional[int] = row.copy()[: len(__SCREAMING_SNAKE_CASE ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__SCREAMING_SNAKE_CASE ) == 0:
solutions.append(0 )
continue
lowercase_ : List[str] = temp_row[1::]
lowercase_ : Tuple = temp_row[::-1]
for column_index, column in enumerate(__SCREAMING_SNAKE_CASE ):
current_solution -= column * solutions[column_index]
solutions.append(__SCREAMING_SNAKE_CASE )
lowercase_ : str = []
for item in solutions:
final.append(float(round(__SCREAMING_SNAKE_CASE , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 93 |
'''simple docstring'''
import argparse
import copy
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
lowercase_ : List[Any] = {}
with open(__SCREAMING_SNAKE_CASE ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase_ : Union[str, Any] = []
_list.append([line.split()[1], line.split()[2]] )
lowercase_ : str = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase_ : Optional[int] = []
_list.append([line.split()[0], line.split()[2]] )
lowercase_ : Dict = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE ) as f:
lowercase_ : List[str] = f.read(1 )
lowercase_ : Optional[int] = start_node
lowercase_ : Any = []
lowercase_ : List[str] = start_node
lowercase_ : Optional[Any] = 0
while visiting not in first_solution:
lowercase_ : Any = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__SCREAMING_SNAKE_CASE ) and k[0] not in first_solution:
lowercase_ : List[Any] = k[1]
lowercase_ : List[Any] = k[0]
first_solution.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = distance_of_first_solution + int(__SCREAMING_SNAKE_CASE )
lowercase_ : int = best_node
first_solution.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase_ : Optional[Any] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
lowercase_ : Tuple = []
for n in solution[1:-1]:
lowercase_ : List[str] = solution.index(__SCREAMING_SNAKE_CASE )
for kn in solution[1:-1]:
lowercase_ : Any = solution.index(__SCREAMING_SNAKE_CASE )
if n == kn:
continue
lowercase_ : Dict = copy.deepcopy(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = kn
lowercase_ : List[Any] = n
lowercase_ : str = 0
for k in _tmp[:-1]:
lowercase_ : Tuple = _tmp[_tmp.index(__SCREAMING_SNAKE_CASE ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase_ : Optional[Any] = distance + int(i[1] )
_tmp.append(__SCREAMING_SNAKE_CASE )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase_ : Union[str, Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __SCREAMING_SNAKE_CASE : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = 1
lowercase_ : List[str] = first_solution
lowercase_ : Dict = []
lowercase_ : List[str] = distance_of_first_solution
lowercase_ : Optional[Any] = solution
while count <= iters:
lowercase_ : int = find_neighborhood(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = 0
lowercase_ : Dict = neighborhood[index_of_best_solution]
lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) - 1
lowercase_ : Tuple = False
while not found:
lowercase_ : Optional[int] = 0
while i < len(__SCREAMING_SNAKE_CASE ):
if best_solution[i] != solution[i]:
lowercase_ : Tuple = best_solution[i]
lowercase_ : Optional[int] = solution[i]
break
lowercase_ : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase_ : Tuple = True
lowercase_ : Optional[int] = best_solution[:-1]
lowercase_ : Optional[Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase_ : Optional[Any] = cost
lowercase_ : int = solution
else:
lowercase_ : Any = index_of_best_solution + 1
lowercase_ : Any = neighborhood[index_of_best_solution]
if len(__SCREAMING_SNAKE_CASE ) >= size:
tabu_list.pop(0 )
lowercase_ : List[Any] = count + 1
return best_solution_ever, best_cost
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str]=None ):
"""simple docstring"""
lowercase_ : Any = generate_neighbours(args.File )
lowercase_ , lowercase_ : Union[str, Any] = generate_first_solution(
args.File , __SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : Optional[int] = tabu_search(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 93 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase : List[Any] = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 93 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase_ : int = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
lowercase_ : Union[str, Any] = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
lowercase_ : Union[str, Any] = model(input_ids.to(__SCREAMING_SNAKE_CASE ) , labels=labels.to(__SCREAMING_SNAKE_CASE ) ).loss
lowercase_ : int = -(labels.shape[-1] * loss.item())
lowercase_ : Any = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 93 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowercase : Dict = logging.get_logger(__name__)
_lowercase : List[Any] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''longformer'''
def __init__( self , __SCREAMING_SNAKE_CASE = 5_12 , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 3_05_22 , __SCREAMING_SNAKE_CASE = 7_68 , __SCREAMING_SNAKE_CASE = 12 , __SCREAMING_SNAKE_CASE = 12 , __SCREAMING_SNAKE_CASE = 30_72 , __SCREAMING_SNAKE_CASE = "gelu" , __SCREAMING_SNAKE_CASE = 0.1 , __SCREAMING_SNAKE_CASE = 0.1 , __SCREAMING_SNAKE_CASE = 5_12 , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 0.02 , __SCREAMING_SNAKE_CASE = 1E-1_2 , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = attention_window
lowercase_ : Any = sep_token_id
lowercase_ : int = bos_token_id
lowercase_ : Any = eos_token_id
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : Any = hidden_size
lowercase_ : Optional[Any] = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : Optional[int] = hidden_act
lowercase_ : int = intermediate_size
lowercase_ : str = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = max_position_embeddings
lowercase_ : str = type_vocab_size
lowercase_ : Tuple = initializer_range
lowercase_ : Optional[Any] = layer_norm_eps
lowercase_ : Union[str, Any] = onnx_export
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "default" , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = True
@property
def _snake_case ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase_ : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase_ : int = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = super().outputs
if self.task == "default":
lowercase_ : Tuple = {0: '''batch'''}
return outputs
@property
def _snake_case ( self ):
"""simple docstring"""
return 1E-4
@property
def _snake_case ( self ):
"""simple docstring"""
return max(super().default_onnx_opset , 14 )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
lowercase_ : int = super().generate_dummy_inputs(
preprocessor=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowercase_ : Union[str, Any] = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
lowercase_ : Optional[Any] = 1
return inputs
| 93 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : List[str] = len(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = []
for i in range(len(__SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowercase_ : Tuple = True
for j in range(__SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowercase_ : List[str] = False
break
if match_found:
position.append(__SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 93 | 1 |
'''simple docstring'''
from math import ceil
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Any = list(range(0 , __SCREAMING_SNAKE_CASE ) )
lowercase_ : List[str] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowercase_ : str = []
for i in device_map_blocks:
if device_map_blocks.count(__SCREAMING_SNAKE_CASE ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__SCREAMING_SNAKE_CASE )
# Missing blocks
lowercase_ : Optional[int] = [i for i in blocks if i not in device_map_blocks]
lowercase_ : Any = [i for i in device_map_blocks if i not in blocks]
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(__SCREAMING_SNAKE_CASE ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
lowercase_ : str = list(range(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Any = int(ceil(n_layers / len(__SCREAMING_SNAKE_CASE ) ) )
lowercase_ : Union[str, Any] = [layers[i : i + n_blocks] for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )]
return dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
| 93 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_lowercase : Optional[Any] = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
_lowercase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Tuple = '''https://pypi.org/pypi/diffusers/json'''
lowercase_ : Tuple = json.loads(request.urlopen(__SCREAMING_SNAKE_CASE ).read() )['''releases'''].keys()
return sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : version.Version(__SCREAMING_SNAKE_CASE ) )
def snake_case_ ( ):
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__SCREAMING_SNAKE_CASE )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = Path(__SCREAMING_SNAKE_CASE ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
lowercase_ : str = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : int = f.read()
# Imports of the form `import .xxx`
lowercase_ : List[Any] = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Unique-ify
return list(set(__SCREAMING_SNAKE_CASE ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : int = False
lowercase_ : Any = [module_file]
lowercase_ : Dict = []
# Let's recurse through all relative imports
while not no_change:
lowercase_ : Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Union[str, Any] = Path(__SCREAMING_SNAKE_CASE ).parent
lowercase_ : Optional[int] = [str(module_path / m ) for m in new_imports]
lowercase_ : str = [f for f in new_import_files if f not in all_relative_imports]
lowercase_ : int = [F'''{f}.py''' for f in new_import_files]
lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) == 0
all_relative_imports.extend(__SCREAMING_SNAKE_CASE )
return all_relative_imports
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : Union[str, Any] = f.read()
# Imports of the form `import xxx`
lowercase_ : Any = re.findall('''^\s*import\s+(\S+)\s*$''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Only keep the top-level module
lowercase_ : List[str] = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
lowercase_ : Any = list(set(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Optional[Any] = []
for imp in imports:
try:
importlib.import_module(__SCREAMING_SNAKE_CASE )
except ImportError:
missing_packages.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F'''{', '.join(__SCREAMING_SNAKE_CASE )}. Run `pip install {' '.join(__SCREAMING_SNAKE_CASE )}`''' )
return get_relative_imports(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
lowercase_ : List[Any] = module_path.replace(os.path.sep , '''.''' )
lowercase_ : Any = importlib.import_module(__SCREAMING_SNAKE_CASE )
if class_name is None:
return find_pipeline_class(__SCREAMING_SNAKE_CASE )
return getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
lowercase_ : int = dict(inspect.getmembers(__SCREAMING_SNAKE_CASE , inspect.isclass ) )
lowercase_ : Optional[Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __SCREAMING_SNAKE_CASE )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
lowercase_ : List[Any] = cls
return pipeline_class
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = False , ):
"""simple docstring"""
lowercase_ : Dict = str(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = module_file_or_url
lowercase_ : int = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
lowercase_ : Optional[int] = get_diffusers_versions()
# cut ".dev0"
lowercase_ : List[Any] = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
lowercase_ : List[str] = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
lowercase_ : List[str] = F'''v{revision}'''
elif revision == "main":
lowercase_ : Optional[Any] = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {', '.join(available_versions + ['main'] )}.''' )
# community pipeline on GitHub
lowercase_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=__SCREAMING_SNAKE_CASE , pipeline=__SCREAMING_SNAKE_CASE )
try:
lowercase_ : Optional[Any] = cached_download(
__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , )
lowercase_ : Tuple = '''git'''
lowercase_ : Tuple = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
lowercase_ : str = hf_hub_download(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[Any] = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
lowercase_ : Tuple = check_imports(__SCREAMING_SNAKE_CASE )
# Now we move the module inside our cached dynamic modules.
lowercase_ : str = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = Path(__SCREAMING_SNAKE_CASE ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__SCREAMING_SNAKE_CASE , submodule_path / module_file )
for module_needed in modules_needed:
lowercase_ : Union[str, Any] = F'''{module_needed}.py'''
shutil.copy(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = use_auth_token
elif use_auth_token is True:
lowercase_ : List[Any] = HfFolder.get_token()
else:
lowercase_ : Optional[Any] = None
lowercase_ : Optional[int] = model_info(__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , token=__SCREAMING_SNAKE_CASE ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowercase_ : int = submodule_path / commit_hash
lowercase_ : Tuple = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__SCREAMING_SNAKE_CASE )
if not (submodule_path / module_file).exists():
shutil.copy(__SCREAMING_SNAKE_CASE , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__SCREAMING_SNAKE_CASE , F'''{module_needed}.py''' , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
return os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
"""simple docstring"""
lowercase_ : Optional[Any] = get_cached_module_file(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
return get_class_in_module(__SCREAMING_SNAKE_CASE , final_module.replace('''.py''' , '''''' ) )
| 93 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowercase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowercase : str = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any]=8 ):
"""simple docstring"""
lowercase_ : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , movq=__SCREAMING_SNAKE_CASE , )
lowercase_ : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if latents is None:
lowercase_ : List[Any] = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase_ : int = latents.to(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def _snake_case ( self , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase_ : Dict = torch.device(F'''cuda:{gpu_id}''' )
lowercase_ : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
lowercase_ : Dict = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=__SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : List[str] = cpu_offload_with_hook(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prev_module_hook=__SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
lowercase_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case ( self ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__SCREAMING_SNAKE_CASE , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 5_12 , __SCREAMING_SNAKE_CASE = 5_12 , __SCREAMING_SNAKE_CASE = 1_00 , __SCREAMING_SNAKE_CASE = 4.0 , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
lowercase_ : Optional[Any] = self._execution_device
lowercase_ : List[str] = guidance_scale > 1.0
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Any = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
lowercase_ : Dict = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
lowercase_ : int = image_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
lowercase_ : List[str] = negative_image_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
lowercase_ : int = hint.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
lowercase_ : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = self.scheduler.timesteps
lowercase_ : Optional[int] = self.movq.config.latent_channels
lowercase_ , lowercase_ : List[str] = downscale_height_and_width(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
lowercase_ : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(__SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Optional[int] = {'''image_embeds''': image_embeds, '''hint''': hint}
lowercase_ : Dict = self.unet(
sample=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , added_cond_kwargs=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : List[Any] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Union[str, Any] = variance_pred.chunk(2 )
lowercase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : List[str] = self.scheduler.step(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , )[0]
# post-processing
lowercase_ : Tuple = self.movq.decode(__SCREAMING_SNAKE_CASE , force_not_quantize=__SCREAMING_SNAKE_CASE )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase_ : List[str] = image * 0.5 + 0.5
lowercase_ : Union[str, Any] = image.clamp(0 , 1 )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : List[Any] = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 93 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
_lowercase : Union[str, Any] = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
_lowercase : int = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_lowercase : Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Any = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_lowercase : str = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : List[str] = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
_lowercase : Tuple = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[Any] = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
_lowercase : Tuple = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[int] = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
_lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
_lowercase : Optional[int] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
_lowercase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
_lowercase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
_lowercase : int = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
_lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
_lowercase : int = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
_lowercase : List[str] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : str = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
_lowercase : Dict = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
_lowercase : List[str] = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
_lowercase : str = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
_lowercase : List[Any] = ""
_lowercase : Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
_lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[Any] = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
assert ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(expected_error.format(path='''root''' ) ) ):
lowercase_ : Optional[int] = ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , suppress_parsing_errors=__SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : str = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = expected_error.format(path=__SCREAMING_SNAKE_CASE )
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(__SCREAMING_SNAKE_CASE ) ):
lowercase_ : int = ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = expected_error.format(path=__SCREAMING_SNAKE_CASE )
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(__SCREAMING_SNAKE_CASE ) ):
ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , suppress_parsing_errors=__SCREAMING_SNAKE_CASE )
| 93 | 1 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int=0 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = []
for old_item in old_list:
lowercase_ : Optional[Any] = old_item.replace('''in_layers.0''' , '''norm1''' )
lowercase_ : Dict = new_item.replace('''in_layers.2''' , '''conv1''' )
lowercase_ : int = new_item.replace('''out_layers.0''' , '''norm2''' )
lowercase_ : Union[str, Any] = new_item.replace('''out_layers.3''' , '''conv2''' )
lowercase_ : List[Any] = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
lowercase_ : int = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
lowercase_ : Union[str, Any] = shave_segments(__SCREAMING_SNAKE_CASE , n_shave_prefix_segments=__SCREAMING_SNAKE_CASE )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any]=0 ):
"""simple docstring"""
lowercase_ : List[Any] = []
for old_item in old_list:
lowercase_ : Union[str, Any] = old_item
lowercase_ : Optional[Any] = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
lowercase_ : List[str] = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
lowercase_ : Tuple = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
lowercase_ : List[Any] = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
lowercase_ : Tuple = shave_segments(__SCREAMING_SNAKE_CASE , n_shave_prefix_segments=__SCREAMING_SNAKE_CASE )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Dict=None ):
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowercase_ : Tuple = old_checkpoint[path]
lowercase_ : str = old_tensor.shape[0] // 3
lowercase_ : List[str] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowercase_ : List[Any] = old_tensor.shape[0] // config['''num_head_channels'''] // 3
lowercase_ : int = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowercase_ , lowercase_ , lowercase_ : Optional[int] = old_tensor.split(channels // num_heads , dim=1 )
lowercase_ : Tuple = query.reshape(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = key.reshape(__SCREAMING_SNAKE_CASE )
lowercase_ : int = value.reshape(__SCREAMING_SNAKE_CASE )
for path in paths:
lowercase_ : List[str] = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowercase_ : str = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
lowercase_ : List[Any] = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
lowercase_ : Dict = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
lowercase_ : Dict = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowercase_ : Optional[Any] = old_checkpoint[path['''old''']][:, :, 0]
else:
lowercase_ : Optional[Any] = old_checkpoint[path['''old''']]
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Optional[int] = checkpoint['''time_embed.0.weight''']
lowercase_ : Optional[Any] = checkpoint['''time_embed.0.bias''']
lowercase_ : Optional[Any] = checkpoint['''time_embed.2.weight''']
lowercase_ : int = checkpoint['''time_embed.2.bias''']
lowercase_ : Optional[Any] = checkpoint['''input_blocks.0.0.weight''']
lowercase_ : List[str] = checkpoint['''input_blocks.0.0.bias''']
lowercase_ : int = checkpoint['''out.0.weight''']
lowercase_ : Tuple = checkpoint['''out.0.bias''']
lowercase_ : Dict = checkpoint['''out.2.weight''']
lowercase_ : Any = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
lowercase_ : int = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
lowercase_ : Union[str, Any] = {
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(__SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the middle blocks only
lowercase_ : int = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
lowercase_ : str = {
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(__SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the output blocks only
lowercase_ : List[Any] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
lowercase_ : str = {
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(__SCREAMING_SNAKE_CASE )
}
for i in range(1 , __SCREAMING_SNAKE_CASE ):
lowercase_ : Any = (i - 1) // (config['''num_res_blocks'''] + 1)
lowercase_ : Any = (i - 1) % (config['''num_res_blocks'''] + 1)
lowercase_ : List[str] = [key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
lowercase_ : List[Any] = [key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
lowercase_ : List[Any] = checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
lowercase_ : List[str] = checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
lowercase_ : Union[str, Any] = renew_resnet_paths(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = {'''old''': F'''input_blocks.{i}.0''', '''new''': F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
lowercase_ : List[Any] = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path, resnet_op] , config=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = renew_attention_paths(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = {
'''old''': F'''input_blocks.{i}.1''',
'''new''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowercase_ : Dict = {
F'''input_blocks.{i}.1.qkv.bias''': {
'''key''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'''query''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'''value''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
'''key''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'''query''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'''value''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE , )
lowercase_ : int = middle_blocks[0]
lowercase_ : Union[str, Any] = middle_blocks[1]
lowercase_ : Optional[Any] = middle_blocks[2]
lowercase_ : Dict = renew_resnet_paths(__SCREAMING_SNAKE_CASE )
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = renew_resnet_paths(__SCREAMING_SNAKE_CASE )
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = renew_attention_paths(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , attention_paths_to_split=__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : List[Any] = i // (config['''num_res_blocks'''] + 1)
lowercase_ : int = i % (config['''num_res_blocks'''] + 1)
lowercase_ : Optional[Any] = [shave_segments(__SCREAMING_SNAKE_CASE , 2 ) for name in output_blocks[i]]
lowercase_ : Any = {}
for layer in output_block_layers:
lowercase_ , lowercase_ : Any = layer.split('''.''' )[0], shave_segments(__SCREAMING_SNAKE_CASE , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Optional[Any] = [layer_name]
if len(__SCREAMING_SNAKE_CASE ) > 1:
lowercase_ : List[Any] = [key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
lowercase_ : Union[str, Any] = [key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
lowercase_ : Optional[Any] = renew_resnet_paths(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = renew_resnet_paths(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = {'''old''': F'''output_blocks.{i}.0''', '''new''': F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=__SCREAMING_SNAKE_CASE )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowercase_ : Any = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
lowercase_ : str = checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
lowercase_ : Union[str, Any] = checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(__SCREAMING_SNAKE_CASE ) == 2:
lowercase_ : Tuple = []
if len(__SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = renew_attention_paths(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = {
'''old''': F'''output_blocks.{i}.1''',
'''new''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowercase_ : List[str] = {
F'''output_blocks.{i}.1.qkv.bias''': {
'''key''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'''query''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'''value''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
'''key''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'''query''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'''value''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=__SCREAMING_SNAKE_CASE , )
else:
lowercase_ : int = renew_resnet_paths(__SCREAMING_SNAKE_CASE , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowercase_ : Optional[int] = '''.'''.join(['''output_blocks''', str(__SCREAMING_SNAKE_CASE ), path['''old''']] )
lowercase_ : Union[str, Any] = '''.'''.join(['''up_blocks''', str(__SCREAMING_SNAKE_CASE ), '''resnets''', str(__SCREAMING_SNAKE_CASE ), path['''new''']] )
lowercase_ : Optional[Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
_lowercase : int = parser.parse_args()
_lowercase : Dict = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowercase : str = json.loads(f.read())
_lowercase : Union[str, Any] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowercase : Any = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowercase : str = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
_lowercase : List[str] = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
_lowercase : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 93 |
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = eval_examples
lowercase_ : Tuple = post_process_function
def _snake_case ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "eval" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[int] = gen_kwargs.copy()
lowercase_ : List[str] = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase_ : str = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase_ : Dict = gen_kwargs
lowercase_ : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase_ : List[str] = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase_ : Union[str, Any] = self.compute_metrics
lowercase_ : Optional[int] = None
lowercase_ : Tuple = time.time()
lowercase_ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase_ : str = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
lowercase_ : Any = compute_metrics
lowercase_ : Any = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase_ : Optional[Any] = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase_ : List[Any] = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
lowercase_ : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase_ : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE )
return metrics
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Union[str, Any] = gen_kwargs.copy()
lowercase_ : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase_ : Optional[Any] = self.compute_metrics
lowercase_ : Optional[int] = None
lowercase_ : List[Any] = time.time()
lowercase_ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase_ : Tuple = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
lowercase_ : Any = compute_metrics
lowercase_ : Tuple = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase_ : Any = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''' )
lowercase_ : str = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase_ : Optional[int] = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
| 93 | 1 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowercase : Any = (7_2_0, 1_2_8_0) # Height, Width
_lowercase : List[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowercase : str = 1 / 1_0_0
_lowercase : Any = ""
_lowercase : Union[str, Any] = ""
_lowercase : Optional[int] = ""
_lowercase : List[Any] = 2_5_0
def snake_case_ ( ):
"""simple docstring"""
lowercase_ , lowercase_ : Any = get_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for index in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : str = random.sample(range(len(__SCREAMING_SNAKE_CASE ) ) , 4 )
lowercase_ , lowercase_ , lowercase_ : Any = update_image_and_anno(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , filter_scale=__SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase_ : int = random_chars(32 )
lowercase_ : str = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
lowercase_ : int = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , __SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowercase_ : List[Any] = []
for anno in new_annos:
lowercase_ : List[Any] = anno[3] - anno[1]
lowercase_ : List[str] = anno[4] - anno[2]
lowercase_ : Dict = anno[1] + width / 2
lowercase_ : Dict = anno[2] + height / 2
lowercase_ : int = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(__SCREAMING_SNAKE_CASE )
with open(F'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
lowercase_ : Optional[Any] = []
for label_file in glob.glob(os.path.join(__SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
lowercase_ : int = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__SCREAMING_SNAKE_CASE ) as in_file:
lowercase_ : List[str] = in_file.readlines()
lowercase_ : Optional[Any] = os.path.join(__SCREAMING_SNAKE_CASE , F'''{label_name}.jpg''' )
lowercase_ : Optional[int] = []
for obj_list in obj_lists:
lowercase_ : List[str] = obj_list.rstrip('''\n''' ).split(''' ''' )
lowercase_ : Optional[int] = float(obj[1] ) - float(obj[3] ) / 2
lowercase_ : Any = float(obj[2] ) - float(obj[4] ) / 2
lowercase_ : str = float(obj[1] ) + float(obj[3] ) / 2
lowercase_ : List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__SCREAMING_SNAKE_CASE )
labels.append(__SCREAMING_SNAKE_CASE )
return img_paths, labels
def snake_case_ ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : tuple[int, int] , __SCREAMING_SNAKE_CASE : tuple[float, float] , __SCREAMING_SNAKE_CASE : float = 0.0 , ):
"""simple docstring"""
lowercase_ : List[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowercase_ : Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ : Optional[int] = int(scale_x * output_size[1] )
lowercase_ : Dict = int(scale_y * output_size[0] )
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = []
for i, index in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = all_img_list[index]
path_list.append(__SCREAMING_SNAKE_CASE )
lowercase_ : int = all_annos[index]
lowercase_ : Dict = cva.imread(__SCREAMING_SNAKE_CASE )
if i == 0: # top-left
lowercase_ : Optional[Any] = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
lowercase_ : Tuple = img
for bbox in img_annos:
lowercase_ : Optional[int] = bbox[1] * scale_x
lowercase_ : Optional[Any] = bbox[2] * scale_y
lowercase_ : str = bbox[3] * scale_x
lowercase_ : Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowercase_ : Dict = cva.resize(__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
lowercase_ : Dict = img
for bbox in img_annos:
lowercase_ : int = scale_x + bbox[1] * (1 - scale_x)
lowercase_ : Dict = bbox[2] * scale_y
lowercase_ : Optional[int] = scale_x + bbox[3] * (1 - scale_x)
lowercase_ : int = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowercase_ : List[Any] = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
lowercase_ : List[str] = img
for bbox in img_annos:
lowercase_ : Any = bbox[1] * scale_x
lowercase_ : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
lowercase_ : str = bbox[3] * scale_x
lowercase_ : Optional[int] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowercase_ : int = cva.resize(
__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowercase_ : List[str] = img
for bbox in img_annos:
lowercase_ : int = scale_x + bbox[1] * (1 - scale_x)
lowercase_ : Any = scale_y + bbox[2] * (1 - scale_y)
lowercase_ : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
lowercase_ : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowercase_ : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
lowercase_ : Any = ascii_lowercase + digits
return "".join(random.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 93 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase : List[str] = ["text", "image", "audio"]
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
lowercase_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
inputs.append(create_inputs(__SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def snake_case_ ( __SCREAMING_SNAKE_CASE : List ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
for output in outputs:
if isinstance(__SCREAMING_SNAKE_CASE , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(__SCREAMING_SNAKE_CASE , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(__SCREAMING_SNAKE_CASE , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class lowerCAmelCase__ :
def _snake_case ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , __SCREAMING_SNAKE_CASE ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = create_inputs(self.tool.inputs )
lowercase_ : Tuple = self.tool(*__SCREAMING_SNAKE_CASE )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Any = [outputs]
self.assertListEqual(output_types(__SCREAMING_SNAKE_CASE ) , self.tool.outputs )
def _snake_case ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = create_inputs(self.tool.inputs )
lowercase_ : int = self.tool(*__SCREAMING_SNAKE_CASE )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = [outputs]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
for output, output_type in zip(__SCREAMING_SNAKE_CASE , self.tool.outputs ):
lowercase_ : Optional[int] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : int = []
for _input, input_type in zip(__SCREAMING_SNAKE_CASE , self.tool.inputs ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Optional[Any] = self.tool(*__SCREAMING_SNAKE_CASE )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = [outputs]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
| 93 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Tuple = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
_lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 93 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase__ :
lowerCAmelCase_ = 42
# setable values
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(common=__SCREAMING_SNAKE_CASE , init_noise_sigma=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
@dataclass
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = 42
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCAmelCase_ = 42
@property
def _snake_case ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , __SCREAMING_SNAKE_CASE = 10_00 , __SCREAMING_SNAKE_CASE = 0.0_001 , __SCREAMING_SNAKE_CASE = 0.02 , __SCREAMING_SNAKE_CASE = "linear" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "fixed_small" , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "epsilon" , __SCREAMING_SNAKE_CASE = jnp.floataa , ):
"""simple docstring"""
lowercase_ : Dict = dtype
def _snake_case ( self , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if common is None:
lowercase_ : Tuple = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase_ : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype )
lowercase_ : List[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__SCREAMING_SNAKE_CASE , init_noise_sigma=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
return sample
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = () ):
"""simple docstring"""
lowercase_ : Optional[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase_ : int = (jnp.arange(0 , __SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
lowercase_ : List[Any] = state.common.alphas_cumprod[t]
lowercase_ : str = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase_ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase_ : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase_ : int = jnp.clip(__SCREAMING_SNAKE_CASE , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase_ : List[str] = jnp.log(jnp.clip(__SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase_ : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase_ : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase_ : Optional[Any] = variance
lowercase_ : Union[str, Any] = state.common.betas[t]
lowercase_ : Union[str, Any] = (predicted_variance + 1) / 2
lowercase_ : Any = frac * max_log + (1 - frac) * min_log
return variance
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
lowercase_ : Optional[int] = timestep
if key is None:
lowercase_ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase_ , lowercase_ : Optional[Any] = jnp.split(__SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
lowercase_ : int = None
# 1. compute alphas, betas
lowercase_ : Any = state.common.alphas_cumprod[t]
lowercase_ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase_ : int = 1 - alpha_prod_t
lowercase_ : str = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase_ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase_ : Any = model_output
elif self.config.prediction_type == "v_prediction":
lowercase_ : List[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase_ : Optional[Any] = jnp.clip(__SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase_ : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase_ : Optional[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase_ : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase_ : str = jax.random.split(__SCREAMING_SNAKE_CASE , num=1 )
lowercase_ : List[Any] = jax.random.normal(__SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , predicted_variance=__SCREAMING_SNAKE_CASE ) ** 0.5) * noise
lowercase_ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase_ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__SCREAMING_SNAKE_CASE , state=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return add_noise_common(state.common , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return get_velocity_common(state.common , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 93 | 1 |
'''simple docstring'''
import os
import string
import sys
_lowercase : str = 1 << 8
_lowercase : Tuple = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 2_7,
"up": 6_5 + ARROW_KEY_FLAG,
"down": 6_6 + ARROW_KEY_FLAG,
"right": 6_7 + ARROW_KEY_FLAG,
"left": 6_8 + ARROW_KEY_FLAG,
"mod_int": 9_1,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 5_0,
"delete": 5_1,
"pg_up": 5_3,
"pg_down": 5_4,
}
_lowercase : List[str] = KEYMAP["up"]
_lowercase : Any = KEYMAP["left"]
if sys.platform == "win32":
_lowercase : str = []
_lowercase : List[str] = {
B"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
B"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(1_0):
_lowercase : Any = ord(str(i))
def snake_case_ ( ):
"""simple docstring"""
if os.name == "nt":
import msvcrt
lowercase_ : Dict = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__SCREAMING_SNAKE_CASE ) == 0:
# Read the keystroke
lowercase_ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowercase_ : Any = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowercase_ : Tuple = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(__SCREAMING_SNAKE_CASE )
if ord(__SCREAMING_SNAKE_CASE ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowercase_ : Dict = chr(KEYMAP['''esc'''] )
except KeyError:
lowercase_ : Dict = cha[1]
else:
lowercase_ : List[Any] = ch.decode(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Any = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowercase_ : Dict = sys.stdin.fileno()
lowercase_ : List[Any] = termios.tcgetattr(__SCREAMING_SNAKE_CASE )
try:
tty.setraw(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = sys.stdin.read(1 )
finally:
termios.tcsetattr(__SCREAMING_SNAKE_CASE , termios.TCSADRAIN , __SCREAMING_SNAKE_CASE )
return ch
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Optional[int] = get_raw_chars()
if ord(__SCREAMING_SNAKE_CASE ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__SCREAMING_SNAKE_CASE ) == KEYMAP["esc"]:
lowercase_ : str = get_raw_chars()
if ord(__SCREAMING_SNAKE_CASE ) == KEYMAP["mod_int"]:
lowercase_ : Dict = get_raw_chars()
if ord(__SCREAMING_SNAKE_CASE ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__SCREAMING_SNAKE_CASE ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__SCREAMING_SNAKE_CASE ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 93 |
'''simple docstring'''
_lowercase : int = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowercase : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
_lowercase : List[str] = True
_lowercase : Optional[int] = False
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowercase_ : Tuple = chain(next_number(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Union[str, Any] = number_chain
while number < 10000000:
lowercase_ : int = number_chain
number *= 10
return number_chain
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 10000000 ):
"""simple docstring"""
for i in range(1 , __SCREAMING_SNAKE_CASE ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 93 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : str = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 93 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Union[str, Any] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 93 | 1 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__SCREAMING_SNAKE_CASE , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
lowercase_ : str = _distribute_shards(**__SCREAMING_SNAKE_CASE )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
lowercase_ : Dict = _split_gen_kwargs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(__SCREAMING_SNAKE_CASE ):
_number_of_shards_in_gen_kwargs(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : int = _number_of_shards_in_gen_kwargs(__SCREAMING_SNAKE_CASE )
assert out == expected
| 93 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Optional[int] = int(__SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : List[str] = divmod(__SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : str = str(__SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('''No input value was provided''' )
lowercase_ : Optional[int] = '''-''' if number.startswith('''-''' ) else ''''''
lowercase_ : Union[str, Any] = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return F'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93 | 1 |
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
lowercase_ : Tuple = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = torch.nn.CosineSimilarity(3 , 1E-0_8 )
lowercase_ : Optional[Any] = torch.nn.Softmax(dim=1 )
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = W_supports['''sizes'''].tolist()
lowercase_ : Dict = W_supports['''start_token_id'''].item()
lowercase_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowercase_ : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.BERT(**__SCREAMING_SNAKE_CASE )
lowercase_ : str = None
lowercase_ : Dict = None
lowercase_ : Tuple = W_supports['''input_ids'''] == start_token_id
lowercase_ : Any = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
lowercase_ : List[str] = 0
else:
lowercase_ : List[Any] = support_sizes[i - 1]
lowercase_ : str = S[s : s + size][start_token_masks[s : s + size]]
lowercase_ : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
lowercase_ : List[str] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowercase_ : List[str] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowercase_ : Tuple = torch.vstack((p_starts, p_start) )
lowercase_ : Optional[Any] = torch.vstack((p_ends, p_end) )
else:
lowercase_ : str = p_start
lowercase_ : int = p_end
return p_starts, p_ends
| 93 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowercase : Any = (7_2_0, 1_2_8_0) # Height, Width
_lowercase : List[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowercase : str = 1 / 1_0_0
_lowercase : Any = ""
_lowercase : Union[str, Any] = ""
_lowercase : Optional[int] = ""
_lowercase : List[Any] = 2_5_0
def snake_case_ ( ):
"""simple docstring"""
lowercase_ , lowercase_ : Any = get_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for index in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : str = random.sample(range(len(__SCREAMING_SNAKE_CASE ) ) , 4 )
lowercase_ , lowercase_ , lowercase_ : Any = update_image_and_anno(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , filter_scale=__SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase_ : int = random_chars(32 )
lowercase_ : str = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
lowercase_ : int = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , __SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowercase_ : List[Any] = []
for anno in new_annos:
lowercase_ : List[Any] = anno[3] - anno[1]
lowercase_ : List[str] = anno[4] - anno[2]
lowercase_ : Dict = anno[1] + width / 2
lowercase_ : Dict = anno[2] + height / 2
lowercase_ : int = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(__SCREAMING_SNAKE_CASE )
with open(F'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
lowercase_ : Optional[Any] = []
for label_file in glob.glob(os.path.join(__SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
lowercase_ : int = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__SCREAMING_SNAKE_CASE ) as in_file:
lowercase_ : List[str] = in_file.readlines()
lowercase_ : Optional[Any] = os.path.join(__SCREAMING_SNAKE_CASE , F'''{label_name}.jpg''' )
lowercase_ : Optional[int] = []
for obj_list in obj_lists:
lowercase_ : List[str] = obj_list.rstrip('''\n''' ).split(''' ''' )
lowercase_ : Optional[int] = float(obj[1] ) - float(obj[3] ) / 2
lowercase_ : Any = float(obj[2] ) - float(obj[4] ) / 2
lowercase_ : str = float(obj[1] ) + float(obj[3] ) / 2
lowercase_ : List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__SCREAMING_SNAKE_CASE )
labels.append(__SCREAMING_SNAKE_CASE )
return img_paths, labels
def snake_case_ ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : tuple[int, int] , __SCREAMING_SNAKE_CASE : tuple[float, float] , __SCREAMING_SNAKE_CASE : float = 0.0 , ):
"""simple docstring"""
lowercase_ : List[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowercase_ : Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ : Optional[int] = int(scale_x * output_size[1] )
lowercase_ : Dict = int(scale_y * output_size[0] )
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = []
for i, index in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = all_img_list[index]
path_list.append(__SCREAMING_SNAKE_CASE )
lowercase_ : int = all_annos[index]
lowercase_ : Dict = cva.imread(__SCREAMING_SNAKE_CASE )
if i == 0: # top-left
lowercase_ : Optional[Any] = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
lowercase_ : Tuple = img
for bbox in img_annos:
lowercase_ : Optional[int] = bbox[1] * scale_x
lowercase_ : Optional[Any] = bbox[2] * scale_y
lowercase_ : str = bbox[3] * scale_x
lowercase_ : Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowercase_ : Dict = cva.resize(__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
lowercase_ : Dict = img
for bbox in img_annos:
lowercase_ : int = scale_x + bbox[1] * (1 - scale_x)
lowercase_ : Dict = bbox[2] * scale_y
lowercase_ : Optional[int] = scale_x + bbox[3] * (1 - scale_x)
lowercase_ : int = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowercase_ : List[Any] = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
lowercase_ : List[str] = img
for bbox in img_annos:
lowercase_ : Any = bbox[1] * scale_x
lowercase_ : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
lowercase_ : str = bbox[3] * scale_x
lowercase_ : Optional[int] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowercase_ : int = cva.resize(
__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowercase_ : List[str] = img
for bbox in img_annos:
lowercase_ : int = scale_x + bbox[1] * (1 - scale_x)
lowercase_ : Any = scale_y + bbox[2] * (1 - scale_y)
lowercase_ : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
lowercase_ : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowercase_ : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
lowercase_ : Any = ascii_lowercase + digits
return "".join(random.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 93 | 1 |
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_lowercase : int = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
lowerCAmelCase_ = None
lowerCAmelCase_ = "utf-8"
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = True # deprecated
lowerCAmelCase_ = None # deprecated
lowerCAmelCase_ = 1_0 << 2_0 # 10MB
lowerCAmelCase_ = None
class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ):
lowerCAmelCase_ = JsonConfig
def _snake_case ( self ):
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
lowercase_ : List[Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowercase_ : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ):
lowercase_ : Union[str, Any] = data_files
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Any = [files]
lowercase_ : Union[str, Any] = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowercase_ : List[Any] = []
for split_name, files in data_files.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = [files]
lowercase_ : Tuple = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) )
return splits
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowercase_ : Tuple = self.config.features.arrow_schema.field(__SCREAMING_SNAKE_CASE ).type
lowercase_ : Optional[Any] = pa_table.append_column(__SCREAMING_SNAKE_CASE , pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=__SCREAMING_SNAKE_CASE ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase_ : Optional[Any] = table_cast(__SCREAMING_SNAKE_CASE , self.config.features.arrow_schema )
return pa_table
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__SCREAMING_SNAKE_CASE , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowercase_ : Any = json.load(__SCREAMING_SNAKE_CASE )
# We keep only the field we are interested in
lowercase_ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
lowercase_ : Tuple = set().union(*[row.keys() for row in dataset] )
lowercase_ : Optional[int] = {col: [row.get(__SCREAMING_SNAKE_CASE ) for row in dataset] for col in keys}
else:
lowercase_ : Dict = dataset
lowercase_ : Dict = pa.Table.from_pydict(__SCREAMING_SNAKE_CASE )
yield file_idx, self._cast_table(__SCREAMING_SNAKE_CASE )
# If the file has one json object per line
else:
with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as f:
lowercase_ : Union[str, Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowercase_ : Tuple = max(self.config.chunksize // 32 , 16 << 10 )
lowercase_ : Optional[Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
lowercase_ : List[str] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__SCREAMING_SNAKE_CASE )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowercase_ : int = batch.decode(self.config.encoding , errors=__SCREAMING_SNAKE_CASE ).encode('''utf-8''' )
try:
while True:
try:
lowercase_ : Union[str, Any] = paj.read_json(
io.BytesIO(__SCREAMING_SNAKE_CASE ) , read_options=paj.ReadOptions(block_size=__SCREAMING_SNAKE_CASE ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__SCREAMING_SNAKE_CASE , pa.ArrowInvalid )
and "straddling" not in str(__SCREAMING_SNAKE_CASE )
or block_size > len(__SCREAMING_SNAKE_CASE )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'''Batch of {len(__SCREAMING_SNAKE_CASE )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__SCREAMING_SNAKE_CASE , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowercase_ : Tuple = json.load(__SCREAMING_SNAKE_CASE )
except json.JSONDecodeError:
logger.error(F'''Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # list is the only sequence type supported in JSON
try:
lowercase_ : List[Any] = set().union(*[row.keys() for row in dataset] )
lowercase_ : Dict = {col: [row.get(__SCREAMING_SNAKE_CASE ) for row in dataset] for col in keys}
lowercase_ : str = pa.Table.from_pydict(__SCREAMING_SNAKE_CASE )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}''' )
raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(__SCREAMING_SNAKE_CASE )
break
else:
logger.error(F'''Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}''' )
raise ValueError(
F'''Not able to read records in the JSON file at {file}. '''
F'''You should probably indicate the field of the JSON file containing your records. '''
F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__SCREAMING_SNAKE_CASE )
batch_idx += 1
| 93 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase__ :
def __init__( self ):
"""simple docstring"""
lowercase_ : int = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = probability
def _snake_case ( self ):
"""simple docstring"""
return list(self.connections )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = 0
lowercase_ : Tuple = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : list[tuple[str, str, float]] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = Counter(graph.get_nodes() )
lowercase_ : Any = start
for _ in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : int = graph.transition(__SCREAMING_SNAKE_CASE )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
_lowercase : Optional[Any] = None
_lowercase : Optional[int] = logging.get_logger(__name__)
_lowercase : Optional[int] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_lowercase : int = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
_lowercase : int = {
"moussaKam/mbarthez": 1_0_2_4,
"moussaKam/barthez": 1_0_2_4,
"moussaKam/barthez-orangesum-title": 1_0_2_4,
}
_lowercase : Optional[int] = "▁"
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = BarthezTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : List[str] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowercase_ : int = vocab_file
lowercase_ : Optional[Any] = False if not self.vocab_file else True
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ : Any = [self.cls_token_id]
lowercase_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : str = [self.sep_token_id]
lowercase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Any = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 93 |
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
lowercase_ : Tuple = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = torch.nn.CosineSimilarity(3 , 1E-0_8 )
lowercase_ : Optional[Any] = torch.nn.Softmax(dim=1 )
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = W_supports['''sizes'''].tolist()
lowercase_ : Dict = W_supports['''start_token_id'''].item()
lowercase_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowercase_ : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.BERT(**__SCREAMING_SNAKE_CASE )
lowercase_ : str = None
lowercase_ : Dict = None
lowercase_ : Tuple = W_supports['''input_ids'''] == start_token_id
lowercase_ : Any = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
lowercase_ : List[str] = 0
else:
lowercase_ : List[Any] = support_sizes[i - 1]
lowercase_ : str = S[s : s + size][start_token_masks[s : s + size]]
lowercase_ : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
lowercase_ : List[str] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowercase_ : List[str] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowercase_ : Tuple = torch.vstack((p_starts, p_start) )
lowercase_ : Optional[Any] = torch.vstack((p_ends, p_end) )
else:
lowercase_ : str = p_start
lowercase_ : int = p_end
return p_starts, p_ends
| 93 | 1 |
'''simple docstring'''
_lowercase : Optional[int] = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 93 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : List[Any] = "▁"
_lowercase : Tuple = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowercase : List[str] = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
_lowercase : List[str] = {
"facebook/m2m100_418M": 1_0_2_4,
}
# fmt: off
_lowercase : Tuple = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="m2m100" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=8 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase_ : List[Any] = language_codes
lowercase_ : Optional[int] = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowercase_ : List[Any] = {lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code}
lowercase_ : Union[str, Any] = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__SCREAMING_SNAKE_CASE )
for lang_code in fairseq_language_code
if self.get_lang_token(__SCREAMING_SNAKE_CASE ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , language_codes=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowercase_ : int = vocab_file
lowercase_ : Any = load_json(__SCREAMING_SNAKE_CASE )
lowercase_ : str = {v: k for k, v in self.encoder.items()}
lowercase_ : Optional[int] = spm_file
lowercase_ : Any = load_spm(__SCREAMING_SNAKE_CASE , self.sp_model_kwargs )
lowercase_ : List[Any] = len(self.encoder )
lowercase_ : Dict = {
self.get_lang_token(__SCREAMING_SNAKE_CASE ): self.encoder_size + i for i, lang_code in enumerate(__SCREAMING_SNAKE_CASE )
}
lowercase_ : Optional[int] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__SCREAMING_SNAKE_CASE )}
lowercase_ : Union[str, Any] = {v: k for k, v in self.lang_token_to_id.items()}
lowercase_ : Tuple = src_lang if src_lang is not None else '''en'''
lowercase_ : Optional[int] = tgt_lang
lowercase_ : Any = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowercase_ : Dict = num_madeup_words
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _snake_case ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder[self.unk_token] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = []
lowercase_ : List[str] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowercase_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = [1] * len(self.prefix_tokens )
lowercase_ : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowercase_ : List[Any] = self.__dict__.copy()
lowercase_ : List[Any] = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : List[Any] = {}
lowercase_ : Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Tuple = Path(__SCREAMING_SNAKE_CASE )
if not save_dir.is_dir():
raise OSError(F'''{save_directory} should be a directory''' )
lowercase_ : Dict = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
lowercase_ : Dict = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __SCREAMING_SNAKE_CASE )
if os.path.abspath(self.spm_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.spm_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowercase_ : int = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (str(__SCREAMING_SNAKE_CASE ), str(__SCREAMING_SNAKE_CASE ))
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[Any] = src_lang
lowercase_ : List[str] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase_ : Tuple = src_lang
lowercase_ : Any = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = self.get_lang_id(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = tgt_lang_id
return inputs
def _snake_case ( self ):
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = self.get_lang_token(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = self.lang_token_to_id[lang_token]
lowercase_ : Optional[Any] = [self.cur_lang_id]
lowercase_ : Union[str, Any] = [self.eos_token_id]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = self.get_lang_token(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = self.lang_token_to_id[lang_token]
lowercase_ : str = [self.cur_lang_id]
lowercase_ : List[str] = [self.eos_token_id]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.lang_code_to_token[lang]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = self.get_lang_token(__SCREAMING_SNAKE_CASE )
return self.lang_token_to_id[lang_token]
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict[str, Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = sentencepiece.SentencePieceProcessor(**__SCREAMING_SNAKE_CASE )
spm.Load(str(__SCREAMING_SNAKE_CASE ) )
return spm
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
return json.load(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , indent=2 )
| 93 | 1 |
'''simple docstring'''
from math import isqrt, loga
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Any = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = False
return [i for i in range(2 , __SCREAMING_SNAKE_CASE ) if is_prime[i]]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 800800 , __SCREAMING_SNAKE_CASE : int = 800800 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = degree * loga(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = int(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = calculate_prime_numbers(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = 0
lowercase_ : List[Any] = 0
lowercase_ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 93 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[Any] = "▁"
_lowercase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_lowercase : Optional[int] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
_lowercase : str = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
_lowercase : List[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
lowercase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
lowercase_ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ : Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ : str = 1
lowercase_ : str = len(self.sp_model )
lowercase_ : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__SCREAMING_SNAKE_CASE )
}
lowercase_ : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
lowercase_ : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase_ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase_ : Optional[Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase_ : Optional[Any] = src_lang if src_lang is not None else '''en_XX'''
lowercase_ : str = self.lang_code_to_id[self._src_lang]
lowercase_ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
lowercase_ : Optional[int] = self.__dict__.copy()
lowercase_ : Dict = None
lowercase_ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : Dict = {}
lowercase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _snake_case ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = [1] * len(self.prefix_tokens )
lowercase_ : Tuple = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Optional[int] = [self.sep_token_id]
lowercase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase_ : Optional[Any] = src_lang
lowercase_ : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = tgt_lang_id
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ : Any = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Tuple = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowercase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : List[str] = src_lang
lowercase_ : int = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = self.lang_code_to_id[src_lang]
lowercase_ : Optional[Any] = []
lowercase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = self.lang_code_to_id[lang]
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
| 93 | 1 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowercase : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
inspect_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Dict = path + '''.py'''
assert script_name in os.listdir(__SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(__SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
inspect_metric(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = path + '''.py'''
assert script_name in os.listdir(__SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(__SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = get_dataset_config_info(__SCREAMING_SNAKE_CASE , config_name=__SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
with pytest.raises(__SCREAMING_SNAKE_CASE ):
get_dataset_config_info(__SCREAMING_SNAKE_CASE , config_name=__SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
lowercase_ : Dict = get_dataset_config_names(__SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
lowercase_ : Union[str, Any] = get_dataset_infos(__SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
lowercase_ : List[Any] = expected_configs[0]
assert expected_config in infos
lowercase_ : Tuple = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
lowercase_ : Dict = get_dataset_infos(__SCREAMING_SNAKE_CASE )
assert expected_config in infos
lowercase_ : List[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
with pytest.raises(__SCREAMING_SNAKE_CASE ):
get_dataset_split_names(__SCREAMING_SNAKE_CASE , config_name=__SCREAMING_SNAKE_CASE )
| 93 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCAmelCase__ :
lowerCAmelCase_ = None
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : Any = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : str = os.path.join(__SCREAMING_SNAKE_CASE , '''feat_extract.json''' )
feat_extract_first.to_json_file(__SCREAMING_SNAKE_CASE )
lowercase_ : str = self.feature_extraction_class.from_json_file(__SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : Union[str, Any] = feat_extract_first.save_pretrained(__SCREAMING_SNAKE_CASE )[0]
check_json_file_has_correct_format(__SCREAMING_SNAKE_CASE )
lowercase_ : str = self.feature_extraction_class.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = self.feature_extraction_class()
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 93 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
@property
def _snake_case ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = ort.SessionOptions()
lowercase_ : List[Any] = False
return options
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowercase_ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowercase_ : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = '''A red cat sitting on a park bench'''
lowercase_ : Optional[Any] = np.random.RandomState(0 )
lowercase_ : str = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=10 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
lowercase_ : int = output.images
lowercase_ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
lowercase_ : int = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowercase_ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowercase_ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
lowercase_ : int = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = '''A red cat sitting on a park bench'''
lowercase_ : Dict = np.random.RandomState(0 )
lowercase_ : int = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
lowercase_ : Optional[Any] = output.images
lowercase_ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
lowercase_ : List[str] = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 93 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''pix2struct_text_model'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=5_02_44 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=20_48 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_28 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1E-6 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = vocab_size
lowercase_ : Tuple = hidden_size
lowercase_ : Optional[Any] = d_kv
lowercase_ : List[str] = d_ff
lowercase_ : List[str] = num_layers
lowercase_ : Optional[Any] = num_heads
lowercase_ : Union[str, Any] = relative_attention_num_buckets
lowercase_ : Optional[int] = relative_attention_max_distance
lowercase_ : Union[str, Any] = dropout_rate
lowercase_ : Dict = layer_norm_epsilon
lowercase_ : Dict = initializer_factor
lowercase_ : List[Any] = use_cache
lowercase_ : Optional[int] = eos_token_id
lowercase_ : Optional[int] = decoder_start_token_id
# for backwards compatibility
lowercase_ : Any = dense_act_fn
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , tie_word_embeddings=__SCREAMING_SNAKE_CASE , is_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : Optional[int] = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
lowercase_ : List[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''pix2struct_vision_model'''
def __init__( self , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=20_48 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=1E-6 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=1E-1_0 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=40_96 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_28 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : Any = patch_embed_hidden_size
lowercase_ : List[Any] = d_ff
lowercase_ : Dict = dropout_rate
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : int = initializer_range
lowercase_ : Dict = initializer_factor
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[Any] = layer_norm_eps
lowercase_ : str = dense_act_fn
lowercase_ : Dict = seq_len
lowercase_ : List[Any] = relative_attention_num_buckets
lowercase_ : int = relative_attention_max_distance
lowercase_ : Optional[int] = d_kv
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : str = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
lowercase_ : Optional[int] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''pix2struct'''
lowerCAmelCase_ = True
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text_config is None:
lowercase_ : Optional[Any] = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
lowercase_ : Dict = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
lowercase_ : str = PixaStructTextConfig(**__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = PixaStructVisionConfig(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.text_config.decoder_start_token_id
lowercase_ : Union[str, Any] = self.text_config.pad_token_id
lowercase_ : Union[str, Any] = self.text_config.eos_token_id
lowercase_ : int = initializer_factor
lowercase_ : Any = initializer_range
lowercase_ : str = self.initializer_range
lowercase_ : str = self.initializer_range
lowercase_ : int = is_vqa
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = copy.deepcopy(self.__dict__ )
lowercase_ : Any = self.text_config.to_dict()
lowercase_ : Optional[Any] = self.vision_config.to_dict()
lowercase_ : Optional[int] = self.__class__.model_type
return output
| 93 | 1 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowercase_ : Optional[Any] = VideoClassificationPipeline(model=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE , top_k=2 )
lowercase_ : Optional[Any] = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for example in examples:
lowercase_ : Tuple = video_classifier(__SCREAMING_SNAKE_CASE )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
] , )
@require_torch
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
lowercase_ : str = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
lowercase_ : List[Any] = pipeline(
'''video-classification''' , model=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , frame_sampling_rate=4 )
lowercase_ : List[Any] = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowercase_ : Optional[int] = video_classifier(__SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , )
lowercase_ : Dict = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def _snake_case ( self ):
"""simple docstring"""
pass
| 93 |
'''simple docstring'''
from math import isqrt, loga
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Any = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = False
return [i for i in range(2 , __SCREAMING_SNAKE_CASE ) if is_prime[i]]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 800800 , __SCREAMING_SNAKE_CASE : int = 800800 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = degree * loga(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = int(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = calculate_prime_numbers(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = 0
lowercase_ : List[Any] = 0
lowercase_ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 93 | 1 |
'''simple docstring'''
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = RoFormerTokenizer
lowerCAmelCase_ = RoFormerTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = '''永和服装饰品有限公司,今天天气非常好'''
lowercase_ : str = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = self.get_tokenizer()
lowercase_ , lowercase_ : Any = self.get_chinese_input_output_texts()
lowercase_ : Any = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , output_text.split() )
lowercase_ : List[str] = tokens + [tokenizer.unk_token]
lowercase_ : int = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = self.get_rust_tokenizer()
lowercase_ , lowercase_ : Optional[Any] = self.get_chinese_input_output_texts()
lowercase_ : Any = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , output_text.split() )
lowercase_ : str = tokens + [tokenizer.unk_token]
lowercase_ : List[Any] = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
pass
| 93 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase : int = logging.get_logger(__name__)
_lowercase : List[Any] = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase_ = '''nat'''
lowerCAmelCase_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=[3, 4, 6, 5] , __SCREAMING_SNAKE_CASE=[2, 4, 8, 16] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = patch_size
lowercase_ : List[Any] = num_channels
lowercase_ : str = embed_dim
lowercase_ : List[str] = depths
lowercase_ : str = len(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = num_heads
lowercase_ : int = kernel_size
lowercase_ : Union[str, Any] = mlp_ratio
lowercase_ : Optional[int] = qkv_bias
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : Optional[int] = attention_probs_dropout_prob
lowercase_ : List[Any] = drop_path_rate
lowercase_ : List[Any] = hidden_act
lowercase_ : int = layer_norm_eps
lowercase_ : int = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase_ : Dict = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
lowercase_ : Tuple = layer_scale_init_value
lowercase_ : Union[str, Any] = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
lowercase_ , lowercase_ : int = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 93 | 1 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_lowercase : List[str] = "<<<<<<< This should probably be modified because it mentions: "
_lowercase : int = "=======\n>>>>>>>\n"
_lowercase : Optional[Any] = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
_lowercase : Optional[Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(r"tfds\.core", r"datasets"),
(r"tf\.io\.gfile\.GFile", r"open"),
(r"tf\.([\w\d]+)", r"datasets.Value('\1')"),
(r"tfds\.features\.Text\(\)", r"datasets.Value('string')"),
(r"tfds\.features\.Text\(", r"datasets.Value('string'),"),
(r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("),
(r"tfds\.features\.FeaturesDict\(", r"dict("),
(r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(r"tfds\.", r"datasets."),
(r"dl_manager\.manual_dir", r"self.config.data_dir"),
(r"self\.builder_config", r"self.config"),
]
def snake_case_ ( __SCREAMING_SNAKE_CASE : Namespace ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowerCAmelCase__ ( lowerCamelCase_ ):
@staticmethod
def _snake_case ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = get_logger('''datasets-cli/converting''' )
lowercase_ : List[Any] = tfds_path
lowercase_ : Tuple = datasets_directory
def _snake_case ( self ):
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
lowercase_ : Any = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase_ : Optional[Any] = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowercase_ : List[Any] = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
lowercase_ : Optional[Any] = []
lowercase_ : Optional[Any] = []
lowercase_ : Union[str, Any] = {}
if os.path.isdir(self._tfds_path ):
lowercase_ : int = os.listdir(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Any = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
lowercase_ : int = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not os.path.isfile(__SCREAMING_SNAKE_CASE ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f:
lowercase_ : List[str] = f.readlines()
lowercase_ : List[Any] = []
lowercase_ : str = False
lowercase_ : Union[str, Any] = False
lowercase_ : Tuple = []
for line in lines:
lowercase_ : Any = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase_ : Union[str, Any] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowercase_ : str = ''''''
continue
elif "from absl import logging" in out_line:
lowercase_ : Any = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowercase_ : Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase_ : Union[str, Any] = True
lowercase_ : Union[str, Any] = list(filter(lambda __SCREAMING_SNAKE_CASE : e in out_line , __SCREAMING_SNAKE_CASE ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__SCREAMING_SNAKE_CASE ) + '''\n''' )
out_lines.append(__SCREAMING_SNAKE_CASE )
out_lines.append(__SCREAMING_SNAKE_CASE )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase_ : Optional[int] = re.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase_ : List[str] = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , __SCREAMING_SNAKE_CASE )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowercase_ : Tuple = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase_ : str = True
out_lines.append(__SCREAMING_SNAKE_CASE )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase_ : Tuple = f_name.replace('''.py''' , '''''' )
lowercase_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__SCREAMING_SNAKE_CASE )
if needs_manual_update:
with_manual_update.append(__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(__SCREAMING_SNAKE_CASE )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
lowercase_ : Union[str, Any] = os.path.basename(__SCREAMING_SNAKE_CASE )
lowercase_ : int = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 93 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Union[str, Any] = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 93 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Union[str, Any] = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 93 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = [10, 20, 30, 40, 50, 60]
lowercase_ : Optional[Any] = [2, 4, 6, 8, 10, 12]
lowercase_ : Union[str, Any] = 1_00
self.assertEqual(kp.calc_profit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 2_10 )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Weight can not be negative.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Profit can not be negative.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 93 | 1 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , ):
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
'''simple docstring'''
import argparse
import copy
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
lowercase_ : List[Any] = {}
with open(__SCREAMING_SNAKE_CASE ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase_ : Union[str, Any] = []
_list.append([line.split()[1], line.split()[2]] )
lowercase_ : str = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase_ : Optional[int] = []
_list.append([line.split()[0], line.split()[2]] )
lowercase_ : Dict = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE ) as f:
lowercase_ : List[str] = f.read(1 )
lowercase_ : Optional[int] = start_node
lowercase_ : Any = []
lowercase_ : List[str] = start_node
lowercase_ : Optional[Any] = 0
while visiting not in first_solution:
lowercase_ : Any = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__SCREAMING_SNAKE_CASE ) and k[0] not in first_solution:
lowercase_ : List[Any] = k[1]
lowercase_ : List[Any] = k[0]
first_solution.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = distance_of_first_solution + int(__SCREAMING_SNAKE_CASE )
lowercase_ : int = best_node
first_solution.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase_ : Optional[Any] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
lowercase_ : Tuple = []
for n in solution[1:-1]:
lowercase_ : List[str] = solution.index(__SCREAMING_SNAKE_CASE )
for kn in solution[1:-1]:
lowercase_ : Any = solution.index(__SCREAMING_SNAKE_CASE )
if n == kn:
continue
lowercase_ : Dict = copy.deepcopy(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = kn
lowercase_ : List[Any] = n
lowercase_ : str = 0
for k in _tmp[:-1]:
lowercase_ : Tuple = _tmp[_tmp.index(__SCREAMING_SNAKE_CASE ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase_ : Optional[Any] = distance + int(i[1] )
_tmp.append(__SCREAMING_SNAKE_CASE )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase_ : Union[str, Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __SCREAMING_SNAKE_CASE : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = 1
lowercase_ : List[str] = first_solution
lowercase_ : Dict = []
lowercase_ : List[str] = distance_of_first_solution
lowercase_ : Optional[Any] = solution
while count <= iters:
lowercase_ : int = find_neighborhood(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = 0
lowercase_ : Dict = neighborhood[index_of_best_solution]
lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) - 1
lowercase_ : Tuple = False
while not found:
lowercase_ : Optional[int] = 0
while i < len(__SCREAMING_SNAKE_CASE ):
if best_solution[i] != solution[i]:
lowercase_ : Tuple = best_solution[i]
lowercase_ : Optional[int] = solution[i]
break
lowercase_ : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase_ : Tuple = True
lowercase_ : Optional[int] = best_solution[:-1]
lowercase_ : Optional[Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase_ : Optional[Any] = cost
lowercase_ : int = solution
else:
lowercase_ : Any = index_of_best_solution + 1
lowercase_ : Any = neighborhood[index_of_best_solution]
if len(__SCREAMING_SNAKE_CASE ) >= size:
tabu_list.pop(0 )
lowercase_ : List[Any] = count + 1
return best_solution_ever, best_cost
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str]=None ):
"""simple docstring"""
lowercase_ : Any = generate_neighbours(args.File )
lowercase_ , lowercase_ : Union[str, Any] = generate_first_solution(
args.File , __SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : Optional[int] = tabu_search(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 93 | 1 |
'''simple docstring'''
_lowercase : Tuple = 9.8_0_6_6_5
def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float = g ):
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 93 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase_ : int = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
lowercase_ : Union[str, Any] = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
lowercase_ : Union[str, Any] = model(input_ids.to(__SCREAMING_SNAKE_CASE ) , labels=labels.to(__SCREAMING_SNAKE_CASE ) ).loss
lowercase_ : int = -(labels.shape[-1] * loss.item())
lowercase_ : Any = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 93 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowercase : str = logging.get_logger(__name__)
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : int = b.T
lowercase_ : List[str] = np.sum(np.square(__SCREAMING_SNAKE_CASE ) , axis=1 )
lowercase_ : str = np.sum(np.square(__SCREAMING_SNAKE_CASE ) , axis=0 )
lowercase_ : Union[str, Any] = np.matmul(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = aa[:, None] - 2 * ab + ba[None, :]
return d
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
lowercase_ : Any = x.reshape(-1 , 3 )
lowercase_ : List[Any] = squared_euclidean_distance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return np.argmin(__SCREAMING_SNAKE_CASE , axis=1 )
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = ['''pixel_values''']
def __init__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = size if size is not None else {'''height''': 2_56, '''width''': 2_56}
lowercase_ : Any = get_size_dict(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = np.array(__SCREAMING_SNAKE_CASE ) if clusters is not None else None
lowercase_ : Union[str, Any] = do_resize
lowercase_ : int = size
lowercase_ : Optional[int] = resample
lowercase_ : List[str] = do_normalize
lowercase_ : List[Any] = do_color_quantize
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[int] = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
__SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
lowercase_ : Optional[int] = rescale(image=__SCREAMING_SNAKE_CASE , scale=1 / 127.5 , data_format=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = image - 1
return image
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
lowercase_ : Union[str, Any] = size if size is not None else self.size
lowercase_ : Union[str, Any] = get_size_dict(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = resample if resample is not None else self.resample
lowercase_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Optional[Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
lowercase_ : Tuple = clusters if clusters is not None else self.clusters
lowercase_ : Dict = np.array(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
lowercase_ : List[Any] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
lowercase_ : str = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
lowercase_ : Optional[Any] = [self.normalize(image=__SCREAMING_SNAKE_CASE ) for image in images]
if do_color_quantize:
lowercase_ : str = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
lowercase_ : Dict = np.array(__SCREAMING_SNAKE_CASE )
lowercase_ : int = color_quantize(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
lowercase_ : Optional[Any] = images.shape[0]
lowercase_ : Optional[Any] = images.reshape(__SCREAMING_SNAKE_CASE , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
lowercase_ : str = list(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Optional[Any] = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
lowercase_ : str = {'''input_ids''': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
| 93 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : List[str] = len(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = []
for i in range(len(__SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowercase_ : Tuple = True
for j in range(__SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowercase_ : List[str] = False
break
if match_found:
position.append(__SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 93 | 1 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def snake_case_ ( ):
"""simple docstring"""
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
lowercase_ : str = [1, 2, 3]
with pytest.raises(__SCREAMING_SNAKE_CASE ):
with parallel_backend('''unsupported backend''' ):
map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=2 )
with pytest.raises(__SCREAMING_SNAKE_CASE ):
with parallel_backend('''unsupported backend''' ):
map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Union[str, Any] = [1, 2]
lowercase_ : Optional[int] = {'''a''': 1, '''b''': 2}
lowercase_ : Optional[int] = {'''a''': [1, 2], '''b''': [3, 4]}
lowercase_ : List[Any] = {'''a''': {'''1''': 1}, '''b''': 2}
lowercase_ : Any = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowercase_ : Union[str, Any] = [2, 3]
lowercase_ : Tuple = {'''a''': 2, '''b''': 3}
lowercase_ : Optional[int] = {'''a''': [2, 3], '''b''': [4, 5]}
lowercase_ : int = {'''a''': {'''1''': 2}, '''b''': 3}
lowercase_ : Optional[Any] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
| 93 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_lowercase : Optional[Any] = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
_lowercase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Tuple = '''https://pypi.org/pypi/diffusers/json'''
lowercase_ : Tuple = json.loads(request.urlopen(__SCREAMING_SNAKE_CASE ).read() )['''releases'''].keys()
return sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : version.Version(__SCREAMING_SNAKE_CASE ) )
def snake_case_ ( ):
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__SCREAMING_SNAKE_CASE )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = Path(__SCREAMING_SNAKE_CASE ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
lowercase_ : str = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : int = f.read()
# Imports of the form `import .xxx`
lowercase_ : List[Any] = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Unique-ify
return list(set(__SCREAMING_SNAKE_CASE ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : int = False
lowercase_ : Any = [module_file]
lowercase_ : Dict = []
# Let's recurse through all relative imports
while not no_change:
lowercase_ : Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Union[str, Any] = Path(__SCREAMING_SNAKE_CASE ).parent
lowercase_ : Optional[int] = [str(module_path / m ) for m in new_imports]
lowercase_ : str = [f for f in new_import_files if f not in all_relative_imports]
lowercase_ : int = [F'''{f}.py''' for f in new_import_files]
lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) == 0
all_relative_imports.extend(__SCREAMING_SNAKE_CASE )
return all_relative_imports
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : Union[str, Any] = f.read()
# Imports of the form `import xxx`
lowercase_ : Any = re.findall('''^\s*import\s+(\S+)\s*$''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Only keep the top-level module
lowercase_ : List[str] = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
lowercase_ : Any = list(set(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Optional[Any] = []
for imp in imports:
try:
importlib.import_module(__SCREAMING_SNAKE_CASE )
except ImportError:
missing_packages.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F'''{', '.join(__SCREAMING_SNAKE_CASE )}. Run `pip install {' '.join(__SCREAMING_SNAKE_CASE )}`''' )
return get_relative_imports(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
lowercase_ : List[Any] = module_path.replace(os.path.sep , '''.''' )
lowercase_ : Any = importlib.import_module(__SCREAMING_SNAKE_CASE )
if class_name is None:
return find_pipeline_class(__SCREAMING_SNAKE_CASE )
return getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
lowercase_ : int = dict(inspect.getmembers(__SCREAMING_SNAKE_CASE , inspect.isclass ) )
lowercase_ : Optional[Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __SCREAMING_SNAKE_CASE )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
lowercase_ : List[Any] = cls
return pipeline_class
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = False , ):
"""simple docstring"""
lowercase_ : Dict = str(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = module_file_or_url
lowercase_ : int = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
lowercase_ : Optional[int] = get_diffusers_versions()
# cut ".dev0"
lowercase_ : List[Any] = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
lowercase_ : List[str] = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
lowercase_ : List[str] = F'''v{revision}'''
elif revision == "main":
lowercase_ : Optional[Any] = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {', '.join(available_versions + ['main'] )}.''' )
# community pipeline on GitHub
lowercase_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=__SCREAMING_SNAKE_CASE , pipeline=__SCREAMING_SNAKE_CASE )
try:
lowercase_ : Optional[Any] = cached_download(
__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , )
lowercase_ : Tuple = '''git'''
lowercase_ : Tuple = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
lowercase_ : str = hf_hub_download(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[Any] = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
lowercase_ : Tuple = check_imports(__SCREAMING_SNAKE_CASE )
# Now we move the module inside our cached dynamic modules.
lowercase_ : str = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = Path(__SCREAMING_SNAKE_CASE ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__SCREAMING_SNAKE_CASE , submodule_path / module_file )
for module_needed in modules_needed:
lowercase_ : Union[str, Any] = F'''{module_needed}.py'''
shutil.copy(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = use_auth_token
elif use_auth_token is True:
lowercase_ : List[Any] = HfFolder.get_token()
else:
lowercase_ : Optional[Any] = None
lowercase_ : Optional[int] = model_info(__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , token=__SCREAMING_SNAKE_CASE ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowercase_ : int = submodule_path / commit_hash
lowercase_ : Tuple = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__SCREAMING_SNAKE_CASE )
if not (submodule_path / module_file).exists():
shutil.copy(__SCREAMING_SNAKE_CASE , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__SCREAMING_SNAKE_CASE , F'''{module_needed}.py''' , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
return os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
"""simple docstring"""
lowercase_ : Optional[Any] = get_cached_module_file(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
return get_class_in_module(__SCREAMING_SNAKE_CASE , final_module.replace('''.py''' , '''''' ) )
| 93 | 1 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowercase : Union[str, Any] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
_lowercase : List[str] = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
_lowercase : Optional[int] = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def _snake_case ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if rouge_types is None:
lowercase_ : Tuple = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
lowercase_ : Any = rouge_scorer.RougeScorer(rouge_types=__SCREAMING_SNAKE_CASE , use_stemmer=__SCREAMING_SNAKE_CASE )
if use_aggregator:
lowercase_ : Union[str, Any] = scoring.BootstrapAggregator()
else:
lowercase_ : Union[str, Any] = []
for ref, pred in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Any = scorer.score(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if use_aggregator:
aggregator.add_scores(__SCREAMING_SNAKE_CASE )
else:
scores.append(__SCREAMING_SNAKE_CASE )
if use_aggregator:
lowercase_ : Any = aggregator.aggregate()
else:
lowercase_ : List[Any] = {}
for key in scores[0]:
lowercase_ : Optional[int] = [score[key] for score in scores]
return result
| 93 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
_lowercase : Union[str, Any] = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
_lowercase : int = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_lowercase : Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Any = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_lowercase : str = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : List[str] = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
_lowercase : Tuple = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[Any] = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
_lowercase : Tuple = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[int] = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
_lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
_lowercase : Optional[int] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
_lowercase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
_lowercase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
_lowercase : int = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
_lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
_lowercase : int = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
_lowercase : List[str] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : str = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
_lowercase : Dict = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
_lowercase : List[str] = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
_lowercase : str = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
_lowercase : List[Any] = ""
_lowercase : Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
_lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[Any] = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
assert ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(expected_error.format(path='''root''' ) ) ):
lowercase_ : Optional[int] = ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , suppress_parsing_errors=__SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : str = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = expected_error.format(path=__SCREAMING_SNAKE_CASE )
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(__SCREAMING_SNAKE_CASE ) ):
lowercase_ : int = ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = expected_error.format(path=__SCREAMING_SNAKE_CASE )
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(__SCREAMING_SNAKE_CASE ) ):
ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , suppress_parsing_errors=__SCREAMING_SNAKE_CASE )
| 93 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : List[Any] = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 93 |
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = eval_examples
lowercase_ : Tuple = post_process_function
def _snake_case ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "eval" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[int] = gen_kwargs.copy()
lowercase_ : List[str] = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase_ : str = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase_ : Dict = gen_kwargs
lowercase_ : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase_ : List[str] = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase_ : Union[str, Any] = self.compute_metrics
lowercase_ : Optional[int] = None
lowercase_ : Tuple = time.time()
lowercase_ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase_ : str = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
lowercase_ : Any = compute_metrics
lowercase_ : Any = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase_ : Optional[Any] = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase_ : List[Any] = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
lowercase_ : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase_ : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE )
return metrics
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Union[str, Any] = gen_kwargs.copy()
lowercase_ : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase_ : Optional[Any] = self.compute_metrics
lowercase_ : Optional[int] = None
lowercase_ : List[Any] = time.time()
lowercase_ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase_ : Tuple = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
lowercase_ : Any = compute_metrics
lowercase_ : Tuple = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase_ : Any = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''' )
lowercase_ : str = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase_ : Optional[int] = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
| 93 | 1 |
'''simple docstring'''
import unittest
import numpy as np
def snake_case_ ( __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : np.ndarray | None = None , ):
"""simple docstring"""
lowercase_ : Dict = np.shape(__SCREAMING_SNAKE_CASE )
lowercase_ : str = np.shape(__SCREAMING_SNAKE_CASE )
lowercase_ : int = np.shape(__SCREAMING_SNAKE_CASE )
if shape_a[0] != shape_b[0]:
lowercase_ : Optional[int] = (
'''Expected the same number of rows for A and B. '''
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(__SCREAMING_SNAKE_CASE )
if shape_b[1] != shape_c[1]:
lowercase_ : Optional[int] = (
'''Expected the same number of columns for B and C. '''
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(__SCREAMING_SNAKE_CASE )
lowercase_ : int = pseudo_inv
if a_inv is None:
try:
lowercase_ : str = np.linalg.inv(__SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Any = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : int = np.array([[2, 1], [6, 3]] )
lowercase_ : Tuple = schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : int = np.block([[a, b], [b.T, c]] )
lowercase_ : int = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = np.linalg.det(__SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , det_a * det_s )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : int = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : List[str] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 93 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase : List[str] = ["text", "image", "audio"]
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
lowercase_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
inputs.append(create_inputs(__SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def snake_case_ ( __SCREAMING_SNAKE_CASE : List ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
for output in outputs:
if isinstance(__SCREAMING_SNAKE_CASE , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(__SCREAMING_SNAKE_CASE , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(__SCREAMING_SNAKE_CASE , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class lowerCAmelCase__ :
def _snake_case ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , __SCREAMING_SNAKE_CASE ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = create_inputs(self.tool.inputs )
lowercase_ : Tuple = self.tool(*__SCREAMING_SNAKE_CASE )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Any = [outputs]
self.assertListEqual(output_types(__SCREAMING_SNAKE_CASE ) , self.tool.outputs )
def _snake_case ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = create_inputs(self.tool.inputs )
lowercase_ : int = self.tool(*__SCREAMING_SNAKE_CASE )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = [outputs]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
for output, output_type in zip(__SCREAMING_SNAKE_CASE , self.tool.outputs ):
lowercase_ : Optional[int] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : int = []
for _input, input_type in zip(__SCREAMING_SNAKE_CASE , self.tool.inputs ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Optional[Any] = self.tool(*__SCREAMING_SNAKE_CASE )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = [outputs]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
| 93 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
_lowercase : List[Any] = list[tuple[int, int]]
_lowercase : Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_lowercase : int = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = pos_x
lowercase_ : Tuple = pos_y
lowercase_ : Dict = (pos_y, pos_x)
lowercase_ : Optional[int] = goal_x
lowercase_ : Union[str, Any] = goal_y
lowercase_ : int = parent
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : int = Node(start[1] , start[0] , goal[1] , goal[0] , __SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = [self.start]
lowercase_ : Optional[Any] = False
def _snake_case ( self ):
"""simple docstring"""
while self.node_queue:
lowercase_ : int = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowercase_ : Optional[Any] = True
return self.retrace_path(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = self.get_successors(__SCREAMING_SNAKE_CASE )
for node in successors:
self.node_queue.append(__SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
for action in delta:
lowercase_ : List[str] = parent.pos_x + action[1]
lowercase_ : Optional[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , __SCREAMING_SNAKE_CASE ) )
return successors
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = node
lowercase_ : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowercase_ : Any = current_node.parent
path.reverse()
return path
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = BreadthFirstSearch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = BreadthFirstSearch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = False
def _snake_case ( self ):
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowercase_ : Optional[int] = self.fwd_bfs.node_queue.pop(0 )
lowercase_ : Tuple = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowercase_ : List[str] = True
return self.retrace_bidirectional_path(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = current_bwd_node
lowercase_ : Union[str, Any] = current_fwd_node
lowercase_ : str = {
self.fwd_bfs: self.fwd_bfs.get_successors(__SCREAMING_SNAKE_CASE ),
self.bwd_bfs: self.bwd_bfs.get_successors(__SCREAMING_SNAKE_CASE ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = self.fwd_bfs.retrace_path(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = self.bwd_bfs.retrace_path(__SCREAMING_SNAKE_CASE )
bwd_path.pop()
bwd_path.reverse()
lowercase_ : Tuple = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
_lowercase : List[str] = (0, 0)
_lowercase : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_lowercase : Dict = time.time()
_lowercase : int = BreadthFirstSearch(init, goal)
_lowercase : Any = bfs.search()
_lowercase : Optional[Any] = time.time() - start_bfs_time
print("Unidirectional BFS computation time : ", bfs_time)
_lowercase : Union[str, Any] = time.time()
_lowercase : Dict = BidirectionalBreadthFirstSearch(init, goal)
_lowercase : List[Any] = bd_bfs.search()
_lowercase : List[Any] = time.time() - start_bd_bfs_time
print("Bidirectional BFS computation time : ", bd_bfs_time)
| 93 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase__ :
lowerCAmelCase_ = 42
# setable values
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(common=__SCREAMING_SNAKE_CASE , init_noise_sigma=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
@dataclass
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = 42
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCAmelCase_ = 42
@property
def _snake_case ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , __SCREAMING_SNAKE_CASE = 10_00 , __SCREAMING_SNAKE_CASE = 0.0_001 , __SCREAMING_SNAKE_CASE = 0.02 , __SCREAMING_SNAKE_CASE = "linear" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "fixed_small" , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "epsilon" , __SCREAMING_SNAKE_CASE = jnp.floataa , ):
"""simple docstring"""
lowercase_ : Dict = dtype
def _snake_case ( self , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if common is None:
lowercase_ : Tuple = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase_ : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype )
lowercase_ : List[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__SCREAMING_SNAKE_CASE , init_noise_sigma=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
return sample
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = () ):
"""simple docstring"""
lowercase_ : Optional[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase_ : int = (jnp.arange(0 , __SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
lowercase_ : List[Any] = state.common.alphas_cumprod[t]
lowercase_ : str = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase_ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase_ : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase_ : int = jnp.clip(__SCREAMING_SNAKE_CASE , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase_ : List[str] = jnp.log(jnp.clip(__SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase_ : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase_ : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase_ : Optional[Any] = variance
lowercase_ : Union[str, Any] = state.common.betas[t]
lowercase_ : Union[str, Any] = (predicted_variance + 1) / 2
lowercase_ : Any = frac * max_log + (1 - frac) * min_log
return variance
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
lowercase_ : Optional[int] = timestep
if key is None:
lowercase_ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase_ , lowercase_ : Optional[Any] = jnp.split(__SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
lowercase_ : int = None
# 1. compute alphas, betas
lowercase_ : Any = state.common.alphas_cumprod[t]
lowercase_ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase_ : int = 1 - alpha_prod_t
lowercase_ : str = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase_ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase_ : Any = model_output
elif self.config.prediction_type == "v_prediction":
lowercase_ : List[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase_ : Optional[Any] = jnp.clip(__SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase_ : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase_ : Optional[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase_ : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase_ : str = jax.random.split(__SCREAMING_SNAKE_CASE , num=1 )
lowercase_ : List[Any] = jax.random.normal(__SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , predicted_variance=__SCREAMING_SNAKE_CASE ) ** 0.5) * noise
lowercase_ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase_ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__SCREAMING_SNAKE_CASE , state=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return add_noise_common(state.common , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return get_velocity_common(state.common , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 93 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : str = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
_lowercase : Dict = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
_lowercase : Union[str, Any] = {
"ctrl": 2_5_6,
}
_lowercase : List[Any] = {
"Pregnancy": 1_6_8_6_2_9,
"Christianity": 7_6_7_5,
"Explain": 1_0_6_4_2_3,
"Fitness": 6_3_4_4_0,
"Saving": 6_3_1_6_3,
"Ask": 2_7_1_7_1,
"Ass": 9_5_9_8_5,
"Joke": 1_6_3_5_0_9,
"Questions": 4_5_6_2_2,
"Thoughts": 4_9_6_0_5,
"Retail": 5_2_3_4_2,
"Feminism": 1_6_4_3_3_8,
"Writing": 1_1_9_9_2,
"Atheism": 1_9_2_2_6_3,
"Netflix": 4_8_6_1_6,
"Computing": 3_9_6_3_9,
"Opinion": 4_3_2_1_3,
"Alone": 4_4_9_6_7,
"Funny": 5_8_9_1_7,
"Gaming": 4_0_3_5_8,
"Human": 4_0_8_8,
"India": 1_3_3_1,
"Joker": 7_7_1_3_8,
"Diet": 3_6_2_0_6,
"Legal": 1_1_8_5_9,
"Norman": 4_9_3_9,
"Tip": 7_2_6_8_9,
"Weight": 5_2_3_4_3,
"Movies": 4_6_2_7_3,
"Running": 2_3_4_2_5,
"Science": 2_0_9_0,
"Horror": 3_7_7_9_3,
"Confession": 6_0_5_7_2,
"Finance": 1_2_2_5_0,
"Politics": 1_6_3_6_0,
"Scary": 1_9_1_9_8_5,
"Support": 1_2_6_5_4,
"Technologies": 3_2_5_1_6,
"Teenage": 6_6_1_6_0,
"Event": 3_2_7_6_9,
"Learned": 6_7_4_6_0,
"Notion": 1_8_2_7_7_0,
"Wikipedia": 3_7_5_8_3,
"Books": 6_6_6_5,
"Extract": 7_6_0_5_0,
"Confessions": 1_0_2_7_0_1,
"Conspiracy": 7_5_9_3_2,
"Links": 6_3_6_7_4,
"Narcissus": 1_5_0_4_2_5,
"Relationship": 5_4_7_6_6,
"Relationships": 1_3_4_7_9_6,
"Reviews": 4_1_6_7_1,
"News": 4_2_5_6,
"Translation": 2_6_8_2_0,
"multilingual": 1_2_8_4_0_6,
}
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
lowercase_ : List[str] = set()
lowercase_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ : List[Any] = char
lowercase_ : Union[str, Any] = set(__SCREAMING_SNAKE_CASE )
return pairs
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = CONTROL_CODES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<unk>" , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(unk_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle:
lowercase_ : int = json.load(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = {v: k for k, v in self.encoder.items()}
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as merges_handle:
lowercase_ : Optional[Any] = merges_handle.read().split('''\n''' )[1:-1]
lowercase_ : Tuple = [tuple(merge.split() ) for merge in merges]
lowercase_ : Union[str, Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
lowercase_ : str = {}
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.encoder )
def _snake_case ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase_ : Tuple = tuple(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowercase_ : Any = get_pairs(__SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
lowercase_ : Optional[Any] = min(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ : str = bigram
lowercase_ : List[str] = []
lowercase_ : Tuple = 0
while i < len(__SCREAMING_SNAKE_CASE ):
try:
lowercase_ : int = word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase_ : int = j
if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ : List[Any] = tuple(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = new_word
if len(__SCREAMING_SNAKE_CASE ) == 1:
break
else:
lowercase_ : Tuple = get_pairs(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = '''@@ '''.join(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = word[:-4]
lowercase_ : Union[str, Any] = word
return word
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Union[str, Any] = []
lowercase_ : str = re.findall(R'''\S+\n?''' , __SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(__SCREAMING_SNAKE_CASE ).split(''' ''' ) ) )
return split_tokens
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = ''' '''.join(__SCREAMING_SNAKE_CASE ).replace('''@@ ''' , '''''' ).strip()
return out_string
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Tuple = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase_ : int = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE ) + '''\n''' )
lowercase_ : Dict = 0
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowercase_ : str = token_index
writer.write(''' '''.join(__SCREAMING_SNAKE_CASE ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 93 |
'''simple docstring'''
_lowercase : int = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowercase : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
_lowercase : List[str] = True
_lowercase : Optional[int] = False
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowercase_ : Tuple = chain(next_number(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Union[str, Any] = number_chain
while number < 10000000:
lowercase_ : int = number_chain
number *= 10
return number_chain
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 10000000 ):
"""simple docstring"""
for i in range(1 , __SCREAMING_SNAKE_CASE ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 93 | 1 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : str = {"vocab_file": "vocab.txt"}
_lowercase : Optional[int] = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
_lowercase : Optional[int] = {
"facebook/esm2_t6_8M_UR50D": 1_0_2_4,
"facebook/esm2_t12_35M_UR50D": 1_0_2_4,
}
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
lowercase_ : Dict = f.read().splitlines()
return [l.strip() for l in lines]
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<cls>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE="<eos>" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = load_vocab_file(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = dict(enumerate(self.all_tokens ) )
lowercase_ : List[str] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase_ : Any = unk_token
lowercase_ : Union[str, Any] = cls_token
lowercase_ : Union[str, Any] = pad_token
lowercase_ : List[str] = mask_token
lowercase_ : Tuple = eos_token
lowercase_ : Any = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self._id_to_token.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self._token_to_id.get(__SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return text.split()
def _snake_case ( self , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
return len(self._id_to_token )
def _snake_case ( self ):
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens )}
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self._token_to_id.get(__SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self._id_to_token.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Dict = [self.cls_token_id]
lowercase_ : List[Any] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase_ : int = [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
if token_ids_a is not None:
mask += [0] * len(__SCREAMING_SNAKE_CASE ) + [1]
return mask
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = os.path.join(__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def _snake_case ( self ):
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
return super()._add_tokens(__SCREAMING_SNAKE_CASE , special_tokens=__SCREAMING_SNAKE_CASE )
| 93 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Union[str, Any] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 93 | 1 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_lowercase : Any = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = field(default=lowerCamelCase_ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = super().to_dict()
for k, v in d.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Any = v.to_dict()
return d
| 93 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Optional[int] = int(__SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : List[str] = divmod(__SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : str = str(__SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('''No input value was provided''' )
lowercase_ : Optional[int] = '''-''' if number.startswith('''-''' ) else ''''''
lowercase_ : Union[str, Any] = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return F'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93 | 1 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def snake_case_ ( ):
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 93 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowercase : Any = (7_2_0, 1_2_8_0) # Height, Width
_lowercase : List[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowercase : str = 1 / 1_0_0
_lowercase : Any = ""
_lowercase : Union[str, Any] = ""
_lowercase : Optional[int] = ""
_lowercase : List[Any] = 2_5_0
def snake_case_ ( ):
"""simple docstring"""
lowercase_ , lowercase_ : Any = get_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for index in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : str = random.sample(range(len(__SCREAMING_SNAKE_CASE ) ) , 4 )
lowercase_ , lowercase_ , lowercase_ : Any = update_image_and_anno(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , filter_scale=__SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase_ : int = random_chars(32 )
lowercase_ : str = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
lowercase_ : int = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , __SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowercase_ : List[Any] = []
for anno in new_annos:
lowercase_ : List[Any] = anno[3] - anno[1]
lowercase_ : List[str] = anno[4] - anno[2]
lowercase_ : Dict = anno[1] + width / 2
lowercase_ : Dict = anno[2] + height / 2
lowercase_ : int = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(__SCREAMING_SNAKE_CASE )
with open(F'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
lowercase_ : Optional[Any] = []
for label_file in glob.glob(os.path.join(__SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
lowercase_ : int = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__SCREAMING_SNAKE_CASE ) as in_file:
lowercase_ : List[str] = in_file.readlines()
lowercase_ : Optional[Any] = os.path.join(__SCREAMING_SNAKE_CASE , F'''{label_name}.jpg''' )
lowercase_ : Optional[int] = []
for obj_list in obj_lists:
lowercase_ : List[str] = obj_list.rstrip('''\n''' ).split(''' ''' )
lowercase_ : Optional[int] = float(obj[1] ) - float(obj[3] ) / 2
lowercase_ : Any = float(obj[2] ) - float(obj[4] ) / 2
lowercase_ : str = float(obj[1] ) + float(obj[3] ) / 2
lowercase_ : List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__SCREAMING_SNAKE_CASE )
labels.append(__SCREAMING_SNAKE_CASE )
return img_paths, labels
def snake_case_ ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : tuple[int, int] , __SCREAMING_SNAKE_CASE : tuple[float, float] , __SCREAMING_SNAKE_CASE : float = 0.0 , ):
"""simple docstring"""
lowercase_ : List[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowercase_ : Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ : Optional[int] = int(scale_x * output_size[1] )
lowercase_ : Dict = int(scale_y * output_size[0] )
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = []
for i, index in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = all_img_list[index]
path_list.append(__SCREAMING_SNAKE_CASE )
lowercase_ : int = all_annos[index]
lowercase_ : Dict = cva.imread(__SCREAMING_SNAKE_CASE )
if i == 0: # top-left
lowercase_ : Optional[Any] = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
lowercase_ : Tuple = img
for bbox in img_annos:
lowercase_ : Optional[int] = bbox[1] * scale_x
lowercase_ : Optional[Any] = bbox[2] * scale_y
lowercase_ : str = bbox[3] * scale_x
lowercase_ : Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowercase_ : Dict = cva.resize(__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
lowercase_ : Dict = img
for bbox in img_annos:
lowercase_ : int = scale_x + bbox[1] * (1 - scale_x)
lowercase_ : Dict = bbox[2] * scale_y
lowercase_ : Optional[int] = scale_x + bbox[3] * (1 - scale_x)
lowercase_ : int = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowercase_ : List[Any] = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
lowercase_ : List[str] = img
for bbox in img_annos:
lowercase_ : Any = bbox[1] * scale_x
lowercase_ : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
lowercase_ : str = bbox[3] * scale_x
lowercase_ : Optional[int] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowercase_ : int = cva.resize(
__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowercase_ : List[str] = img
for bbox in img_annos:
lowercase_ : int = scale_x + bbox[1] * (1 - scale_x)
lowercase_ : Any = scale_y + bbox[2] * (1 - scale_y)
lowercase_ : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
lowercase_ : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowercase_ : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
lowercase_ : Any = ascii_lowercase + digits
return "".join(random.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 93 | 1 |
'''simple docstring'''
from datetime import datetime
import requests
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Any = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
lowercase_ : Dict = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(__SCREAMING_SNAKE_CASE ).content
if __name__ == "__main__":
_lowercase : Optional[Any] = input("Enter Video/IGTV url: ").strip()
_lowercase : Optional[Any] = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 93 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase__ :
def __init__( self ):
"""simple docstring"""
lowercase_ : int = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = probability
def _snake_case ( self ):
"""simple docstring"""
return list(self.connections )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = 0
lowercase_ : Tuple = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : list[tuple[str, str, float]] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = Counter(graph.get_nodes() )
lowercase_ : Any = start
for _ in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : int = graph.transition(__SCREAMING_SNAKE_CASE )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 | 1 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = ['''vqvae''']
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , mel=__SCREAMING_SNAKE_CASE , vqvae=__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
return 50 if isinstance(self.scheduler , __SCREAMING_SNAKE_CASE ) else 10_00
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
lowercase_ : str = steps or self.get_default_steps()
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowercase_ : str = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowercase_ : Union[str, Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__SCREAMING_SNAKE_CASE , device=self.device , )
lowercase_ : str = noise
lowercase_ : Any = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : int = self.mel.audio_slice_to_image(__SCREAMING_SNAKE_CASE )
lowercase_ : int = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
lowercase_ : Dict = (input_image / 2_55) * 2 - 1
lowercase_ : Optional[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowercase_ : Optional[int] = self.vqvae.encode(torch.unsqueeze(__SCREAMING_SNAKE_CASE , 0 ) ).latent_dist.sample(
generator=__SCREAMING_SNAKE_CASE )[0]
lowercase_ : str = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowercase_ : str = self.scheduler.add_noise(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.scheduler.timesteps[start_step - 1] )
lowercase_ : str = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowercase_ : Union[str, Any] = int(mask_start_secs * pixels_per_second )
lowercase_ : Union[str, Any] = int(mask_end_secs * pixels_per_second )
lowercase_ : List[Any] = self.scheduler.add_noise(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __SCREAMING_SNAKE_CASE ):
lowercase_ : Any = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )['''sample''']
else:
lowercase_ : int = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )['''sample''']
if isinstance(self.scheduler , __SCREAMING_SNAKE_CASE ):
lowercase_ : str = self.scheduler.step(
model_output=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , sample=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , )['''prev_sample''']
else:
lowercase_ : Tuple = self.scheduler.step(
model_output=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , sample=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
lowercase_ : Dict = mask[:, step, :, :mask_start]
if mask_end > 0:
lowercase_ : Tuple = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowercase_ : Optional[Any] = 1 / self.vqvae.config.scaling_factor * images
lowercase_ : Union[str, Any] = self.vqvae.decode(__SCREAMING_SNAKE_CASE )['''sample''']
lowercase_ : str = (images / 2 + 0.5).clamp(0 , 1 )
lowercase_ : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowercase_ : Any = (images * 2_55).round().astype('''uint8''' )
lowercase_ : Union[str, Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__SCREAMING_SNAKE_CASE , mode='''RGB''' ).convert('''L''' ) for _ in images) )
lowercase_ : Any = [self.mel.image_to_audio(__SCREAMING_SNAKE_CASE ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__SCREAMING_SNAKE_CASE )[:, np.newaxis, :] ) , **ImagePipelineOutput(__SCREAMING_SNAKE_CASE ) )
@torch.no_grad()
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 50 ):
"""simple docstring"""
assert isinstance(self.scheduler , __SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
lowercase_ : Tuple = (sample / 2_55) * 2 - 1
lowercase_ : List[Any] = torch.Tensor(__SCREAMING_SNAKE_CASE ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowercase_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowercase_ : Union[str, Any] = self.scheduler.alphas_cumprod[t]
lowercase_ : List[str] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowercase_ : Optional[int] = 1 - alpha_prod_t
lowercase_ : List[Any] = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )['''sample''']
lowercase_ : int = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowercase_ : str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowercase_ : List[Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Union[str, Any] = acos(torch.dot(torch.flatten(__SCREAMING_SNAKE_CASE ) , torch.flatten(__SCREAMING_SNAKE_CASE ) ) / torch.norm(__SCREAMING_SNAKE_CASE ) / torch.norm(__SCREAMING_SNAKE_CASE ) )
return sin((1 - alpha) * theta ) * xa / sin(__SCREAMING_SNAKE_CASE ) + sin(alpha * theta ) * xa / sin(__SCREAMING_SNAKE_CASE )
| 93 |
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
lowercase_ : Tuple = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = torch.nn.CosineSimilarity(3 , 1E-0_8 )
lowercase_ : Optional[Any] = torch.nn.Softmax(dim=1 )
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = W_supports['''sizes'''].tolist()
lowercase_ : Dict = W_supports['''start_token_id'''].item()
lowercase_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowercase_ : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.BERT(**__SCREAMING_SNAKE_CASE )
lowercase_ : str = None
lowercase_ : Dict = None
lowercase_ : Tuple = W_supports['''input_ids'''] == start_token_id
lowercase_ : Any = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
lowercase_ : List[str] = 0
else:
lowercase_ : List[Any] = support_sizes[i - 1]
lowercase_ : str = S[s : s + size][start_token_masks[s : s + size]]
lowercase_ : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
lowercase_ : List[str] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowercase_ : List[str] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowercase_ : Tuple = torch.vstack((p_starts, p_start) )
lowercase_ : Optional[Any] = torch.vstack((p_ends, p_end) )
else:
lowercase_ : str = p_start
lowercase_ : int = p_end
return p_starts, p_ends
| 93 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_lowercase : Union[str, Any] = logging.get_logger("transformers.models.speecht5")
_lowercase : Optional[Any] = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
_lowercase : List[str] = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
_lowercase : Any = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
_lowercase : Tuple = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
_lowercase : Any = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
_lowercase : int = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
_lowercase : int = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
_lowercase : Any = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
_lowercase : Tuple = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_lowercase : Optional[Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_lowercase : Optional[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_lowercase : List[str] = []
_lowercase : Tuple = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
_lowercase : Optional[int] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
_lowercase : int = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
_lowercase : Tuple = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
for attribute in key.split('''.''' ):
lowercase_ : Tuple = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowercase_ : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).shape
else:
lowercase_ : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase_ : List[Any] = value
elif weight_type == "weight_g":
lowercase_ : Optional[int] = value
elif weight_type == "weight_v":
lowercase_ : Optional[int] = value
elif weight_type == "bias":
lowercase_ : Dict = value
elif weight_type == "running_mean":
lowercase_ : Any = value
elif weight_type == "running_var":
lowercase_ : Optional[int] = value
elif weight_type == "num_batches_tracked":
lowercase_ : Any = value
else:
lowercase_ : Optional[Any] = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowercase_ , lowercase_ : Optional[int] = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Union[str, Any] = []
if task == "s2t":
lowercase_ : str = hf_model.speechta.encoder.prenet.feature_encoder
lowercase_ : List[Any] = MAPPING_S2T
lowercase_ : Optional[int] = IGNORE_KEYS_S2T
elif task == "t2s":
lowercase_ : str = None
lowercase_ : Optional[int] = MAPPING_T2S
lowercase_ : str = IGNORE_KEYS_T2S
elif task == "s2s":
lowercase_ : str = hf_model.speechta.encoder.prenet.feature_encoder
lowercase_ : int = MAPPING_S2S
lowercase_ : Optional[Any] = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
logger.info(F'''{name} was ignored''' )
continue
lowercase_ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
lowercase_ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
lowercase_ , lowercase_ : Any = key.split('''.*.''' )
if prefix in name and suffix in name:
lowercase_ : List[Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
lowercase_ : Any = True
if "*" in mapped_key:
lowercase_ : Optional[int] = name.split(__SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
lowercase_ : Any = mapped_key.replace('''*''' , __SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowercase_ : int = '''weight_g'''
elif "weight_v" in name:
lowercase_ : Optional[int] = '''weight_v'''
elif "bias" in name:
lowercase_ : Tuple = '''bias'''
elif "weight" in name:
lowercase_ : Union[str, Any] = '''weight'''
elif "running_mean" in name:
lowercase_ : str = '''running_mean'''
elif "running_var" in name:
lowercase_ : Optional[Any] = '''running_var'''
elif "num_batches_tracked" in name:
lowercase_ : Tuple = '''num_batches_tracked'''
else:
lowercase_ : int = None
set_recursively(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(__SCREAMING_SNAKE_CASE )
logger.warning(F'''Unused weights: {unused_weights}''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
lowercase_ : List[str] = full_name.split('''conv_layers.''' )[-1]
lowercase_ : List[Any] = name.split('''.''' )
lowercase_ : Union[str, Any] = int(items[0] )
lowercase_ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase_ : str = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase_ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase_ : Optional[int] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase_ : Any = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Tuple=None , ):
"""simple docstring"""
if config_path is not None:
lowercase_ : str = SpeechTaConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Optional[Any] = SpeechTaConfig()
if task == "s2t":
lowercase_ : str = config.max_text_positions
lowercase_ : int = SpeechTaForSpeechToText(__SCREAMING_SNAKE_CASE )
elif task == "t2s":
lowercase_ : Union[str, Any] = 1876
lowercase_ : str = 600
lowercase_ : Union[str, Any] = config.max_speech_positions
lowercase_ : List[Any] = SpeechTaForTextToSpeech(__SCREAMING_SNAKE_CASE )
elif task == "s2s":
lowercase_ : Any = 1876
lowercase_ : Tuple = config.max_speech_positions
lowercase_ : int = SpeechTaForSpeechToSpeech(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
lowercase_ : List[str] = SpeechTaTokenizer(__SCREAMING_SNAKE_CASE , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
lowercase_ : Optional[Any] = AddedToken('''<mask>''' , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
lowercase_ : Optional[int] = SpeechTaFeatureExtractor()
lowercase_ : Optional[Any] = SpeechTaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : str = torch.load(__SCREAMING_SNAKE_CASE )
recursively_load_weights(fairseq_checkpoint['''model'''] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(__SCREAMING_SNAKE_CASE )
model.push_to_hub(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 93 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : List[Any] = "▁"
_lowercase : Tuple = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowercase : List[str] = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
_lowercase : List[str] = {
"facebook/m2m100_418M": 1_0_2_4,
}
# fmt: off
_lowercase : Tuple = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="m2m100" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=8 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase_ : List[Any] = language_codes
lowercase_ : Optional[int] = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowercase_ : List[Any] = {lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code}
lowercase_ : Union[str, Any] = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__SCREAMING_SNAKE_CASE )
for lang_code in fairseq_language_code
if self.get_lang_token(__SCREAMING_SNAKE_CASE ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , language_codes=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowercase_ : int = vocab_file
lowercase_ : Any = load_json(__SCREAMING_SNAKE_CASE )
lowercase_ : str = {v: k for k, v in self.encoder.items()}
lowercase_ : Optional[int] = spm_file
lowercase_ : Any = load_spm(__SCREAMING_SNAKE_CASE , self.sp_model_kwargs )
lowercase_ : List[Any] = len(self.encoder )
lowercase_ : Dict = {
self.get_lang_token(__SCREAMING_SNAKE_CASE ): self.encoder_size + i for i, lang_code in enumerate(__SCREAMING_SNAKE_CASE )
}
lowercase_ : Optional[int] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__SCREAMING_SNAKE_CASE )}
lowercase_ : Union[str, Any] = {v: k for k, v in self.lang_token_to_id.items()}
lowercase_ : Tuple = src_lang if src_lang is not None else '''en'''
lowercase_ : Optional[int] = tgt_lang
lowercase_ : Any = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowercase_ : Dict = num_madeup_words
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _snake_case ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder[self.unk_token] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = []
lowercase_ : List[str] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowercase_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = [1] * len(self.prefix_tokens )
lowercase_ : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowercase_ : List[Any] = self.__dict__.copy()
lowercase_ : List[Any] = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : List[Any] = {}
lowercase_ : Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Tuple = Path(__SCREAMING_SNAKE_CASE )
if not save_dir.is_dir():
raise OSError(F'''{save_directory} should be a directory''' )
lowercase_ : Dict = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
lowercase_ : Dict = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __SCREAMING_SNAKE_CASE )
if os.path.abspath(self.spm_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.spm_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowercase_ : int = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (str(__SCREAMING_SNAKE_CASE ), str(__SCREAMING_SNAKE_CASE ))
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[Any] = src_lang
lowercase_ : List[str] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase_ : Tuple = src_lang
lowercase_ : Any = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = self.get_lang_id(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = tgt_lang_id
return inputs
def _snake_case ( self ):
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = self.get_lang_token(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = self.lang_token_to_id[lang_token]
lowercase_ : Optional[Any] = [self.cur_lang_id]
lowercase_ : Union[str, Any] = [self.eos_token_id]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = self.get_lang_token(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = self.lang_token_to_id[lang_token]
lowercase_ : str = [self.cur_lang_id]
lowercase_ : List[str] = [self.eos_token_id]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.lang_code_to_token[lang]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = self.get_lang_token(__SCREAMING_SNAKE_CASE )
return self.lang_token_to_id[lang_token]
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict[str, Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = sentencepiece.SentencePieceProcessor(**__SCREAMING_SNAKE_CASE )
spm.Load(str(__SCREAMING_SNAKE_CASE ) )
return spm
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
return json.load(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , indent=2 )
| 93 | 1 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
lowercase_ : Optional[Any] = checkpoints.load_tax_checkpoint(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = flatten_dict(__SCREAMING_SNAKE_CASE )
return flax_params
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : List[Any] = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowercase_ : str = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowercase_ : Dict = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowercase_ : List[str] = new_key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowercase_ : int = new_key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowercase_ : List[str] = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowercase_ : Any = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __SCREAMING_SNAKE_CASE )
lowercase_ : str = flax_dict[key]
lowercase_ : List[str] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowercase_ : Tuple = torch.from_numpy(converted_dict[key].T )
else:
lowercase_ : Dict = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
"""simple docstring"""
lowercase_ : Tuple = get_flax_param(__SCREAMING_SNAKE_CASE )
if not use_large:
lowercase_ : Any = PixaStructVisionConfig()
lowercase_ : Optional[Any] = PixaStructTextConfig()
else:
lowercase_ : Tuple = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowercase_ : int = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowercase_ : Any = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = PixaStructForConditionalGeneration(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = rename_and_convert_flax_params(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
lowercase_ : str = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowercase_ : List[str] = PixaStructImageProcessor()
lowercase_ : Tuple = PixaStructProcessor(image_processor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
if use_large:
lowercase_ : str = 4096
lowercase_ : Dict = True
# mkdir if needed
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
print('''Model saved in {}'''.format(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
_lowercase : Tuple = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 93 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[Any] = "▁"
_lowercase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_lowercase : Optional[int] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
_lowercase : str = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
_lowercase : List[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
lowercase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
lowercase_ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ : Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ : str = 1
lowercase_ : str = len(self.sp_model )
lowercase_ : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__SCREAMING_SNAKE_CASE )
}
lowercase_ : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
lowercase_ : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase_ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase_ : Optional[Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase_ : Optional[Any] = src_lang if src_lang is not None else '''en_XX'''
lowercase_ : str = self.lang_code_to_id[self._src_lang]
lowercase_ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
lowercase_ : Optional[int] = self.__dict__.copy()
lowercase_ : Dict = None
lowercase_ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : Dict = {}
lowercase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _snake_case ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = [1] * len(self.prefix_tokens )
lowercase_ : Tuple = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Optional[int] = [self.sep_token_id]
lowercase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase_ : Optional[Any] = src_lang
lowercase_ : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = tgt_lang_id
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ : Any = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Tuple = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowercase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : List[str] = src_lang
lowercase_ : int = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = self.lang_code_to_id[src_lang]
lowercase_ : Optional[Any] = []
lowercase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = self.lang_code_to_id[lang]
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
| 93 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase__ :
def __init__( self ):
"""simple docstring"""
lowercase_ : int = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = probability
def _snake_case ( self ):
"""simple docstring"""
return list(self.connections )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = 0
lowercase_ : Tuple = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : list[tuple[str, str, float]] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = Counter(graph.get_nodes() )
lowercase_ : Any = start
for _ in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : int = graph.transition(__SCREAMING_SNAKE_CASE )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCAmelCase__ :
lowerCAmelCase_ = None
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : Any = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : str = os.path.join(__SCREAMING_SNAKE_CASE , '''feat_extract.json''' )
feat_extract_first.to_json_file(__SCREAMING_SNAKE_CASE )
lowercase_ : str = self.feature_extraction_class.from_json_file(__SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : Union[str, Any] = feat_extract_first.save_pretrained(__SCREAMING_SNAKE_CASE )[0]
check_json_file_has_correct_format(__SCREAMING_SNAKE_CASE )
lowercase_ : str = self.feature_extraction_class.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = self.feature_extraction_class()
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 93 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : str = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''lxmert'''
lowerCAmelCase_ = {}
def __init__( self , __SCREAMING_SNAKE_CASE=3_05_22 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=95_00 , __SCREAMING_SNAKE_CASE=16_00 , __SCREAMING_SNAKE_CASE=4_00 , __SCREAMING_SNAKE_CASE=30_72 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_12 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-1_2 , __SCREAMING_SNAKE_CASE=9 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=20_48 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=6.67 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : List[str] = vocab_size
lowercase_ : Tuple = hidden_size
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : List[Any] = hidden_act
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : Dict = max_position_embeddings
lowercase_ : Union[str, Any] = type_vocab_size
lowercase_ : str = initializer_range
lowercase_ : Optional[int] = layer_norm_eps
lowercase_ : Dict = num_qa_labels
lowercase_ : Tuple = num_object_labels
lowercase_ : Union[str, Any] = num_attr_labels
lowercase_ : Tuple = l_layers
lowercase_ : Any = x_layers
lowercase_ : List[Any] = r_layers
lowercase_ : Any = visual_feat_dim
lowercase_ : Dict = visual_pos_dim
lowercase_ : int = visual_loss_normalizer
lowercase_ : List[Any] = task_matched
lowercase_ : Tuple = task_mask_lm
lowercase_ : Union[str, Any] = task_obj_predict
lowercase_ : Optional[int] = task_qa
lowercase_ : Optional[int] = visual_obj_loss
lowercase_ : Optional[Any] = visual_attr_loss
lowercase_ : Union[str, Any] = visual_feat_loss
lowercase_ : int = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__SCREAMING_SNAKE_CASE )
| 93 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''pix2struct_text_model'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=5_02_44 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=20_48 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_28 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1E-6 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = vocab_size
lowercase_ : Tuple = hidden_size
lowercase_ : Optional[Any] = d_kv
lowercase_ : List[str] = d_ff
lowercase_ : List[str] = num_layers
lowercase_ : Optional[Any] = num_heads
lowercase_ : Union[str, Any] = relative_attention_num_buckets
lowercase_ : Optional[int] = relative_attention_max_distance
lowercase_ : Union[str, Any] = dropout_rate
lowercase_ : Dict = layer_norm_epsilon
lowercase_ : Dict = initializer_factor
lowercase_ : List[Any] = use_cache
lowercase_ : Optional[int] = eos_token_id
lowercase_ : Optional[int] = decoder_start_token_id
# for backwards compatibility
lowercase_ : Any = dense_act_fn
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , tie_word_embeddings=__SCREAMING_SNAKE_CASE , is_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : Optional[int] = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
lowercase_ : List[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''pix2struct_vision_model'''
def __init__( self , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=20_48 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=1E-6 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=1E-1_0 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=40_96 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_28 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : Any = patch_embed_hidden_size
lowercase_ : List[Any] = d_ff
lowercase_ : Dict = dropout_rate
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : int = initializer_range
lowercase_ : Dict = initializer_factor
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[Any] = layer_norm_eps
lowercase_ : str = dense_act_fn
lowercase_ : Dict = seq_len
lowercase_ : List[Any] = relative_attention_num_buckets
lowercase_ : int = relative_attention_max_distance
lowercase_ : Optional[int] = d_kv
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : str = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
lowercase_ : Optional[int] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''pix2struct'''
lowerCAmelCase_ = True
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text_config is None:
lowercase_ : Optional[Any] = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
lowercase_ : Dict = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
lowercase_ : str = PixaStructTextConfig(**__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = PixaStructVisionConfig(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.text_config.decoder_start_token_id
lowercase_ : Union[str, Any] = self.text_config.pad_token_id
lowercase_ : Union[str, Any] = self.text_config.eos_token_id
lowercase_ : int = initializer_factor
lowercase_ : Any = initializer_range
lowercase_ : str = self.initializer_range
lowercase_ : str = self.initializer_range
lowercase_ : int = is_vqa
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = copy.deepcopy(self.__dict__ )
lowercase_ : Any = self.text_config.to_dict()
lowercase_ : Optional[Any] = self.vision_config.to_dict()
lowercase_ : Optional[int] = self.__class__.model_type
return output
| 93 | 1 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
_lowercase : Union[str, Any] = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
_lowercase : int = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_lowercase : Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Any = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_lowercase : str = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : List[str] = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
_lowercase : Tuple = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[Any] = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
_lowercase : Tuple = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[int] = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
_lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
_lowercase : Optional[int] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
_lowercase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
_lowercase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
_lowercase : int = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
_lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
_lowercase : int = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
_lowercase : List[str] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : str = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
_lowercase : Dict = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
_lowercase : List[str] = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
_lowercase : str = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
_lowercase : List[Any] = ""
_lowercase : Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
_lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[Any] = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
assert ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(expected_error.format(path='''root''' ) ) ):
lowercase_ : Optional[int] = ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , suppress_parsing_errors=__SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : str = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = expected_error.format(path=__SCREAMING_SNAKE_CASE )
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(__SCREAMING_SNAKE_CASE ) ):
lowercase_ : int = ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = expected_error.format(path=__SCREAMING_SNAKE_CASE )
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(__SCREAMING_SNAKE_CASE ) ):
ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , suppress_parsing_errors=__SCREAMING_SNAKE_CASE )
| 93 |
'''simple docstring'''
from math import isqrt, loga
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Any = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = False
return [i for i in range(2 , __SCREAMING_SNAKE_CASE ) if is_prime[i]]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 800800 , __SCREAMING_SNAKE_CASE : int = 800800 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = degree * loga(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = int(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = calculate_prime_numbers(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = 0
lowercase_ : List[Any] = 0
lowercase_ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 93 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_lowercase : Any = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''upernet'''
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=5_12 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=[1, 2, 3, 6] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.4 , __SCREAMING_SNAKE_CASE=3_84 , __SCREAMING_SNAKE_CASE=2_56 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2_55 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase_ : List[str] = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = backbone_config.get('''model_type''' )
lowercase_ : Dict = CONFIG_MAPPING[backbone_model_type]
lowercase_ : Tuple = config_class.from_dict(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = backbone_config
lowercase_ : int = hidden_size
lowercase_ : Optional[int] = initializer_range
lowercase_ : Union[str, Any] = pool_scales
lowercase_ : Union[str, Any] = use_auxiliary_head
lowercase_ : Tuple = auxiliary_loss_weight
lowercase_ : Tuple = auxiliary_in_channels
lowercase_ : str = auxiliary_channels
lowercase_ : Any = auxiliary_num_convs
lowercase_ : Tuple = auxiliary_concat_input
lowercase_ : int = loss_ignore_index
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = copy.deepcopy(self.__dict__ )
lowercase_ : Any = self.backbone_config.to_dict()
lowercase_ : Optional[int] = self.__class__.model_type
return output
| 93 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase : int = logging.get_logger(__name__)
_lowercase : List[Any] = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase_ = '''nat'''
lowerCAmelCase_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=[3, 4, 6, 5] , __SCREAMING_SNAKE_CASE=[2, 4, 8, 16] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = patch_size
lowercase_ : List[Any] = num_channels
lowercase_ : str = embed_dim
lowercase_ : List[str] = depths
lowercase_ : str = len(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = num_heads
lowercase_ : int = kernel_size
lowercase_ : Union[str, Any] = mlp_ratio
lowercase_ : Optional[int] = qkv_bias
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : Optional[int] = attention_probs_dropout_prob
lowercase_ : List[Any] = drop_path_rate
lowercase_ : List[Any] = hidden_act
lowercase_ : int = layer_norm_eps
lowercase_ : int = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase_ : Dict = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
lowercase_ : Tuple = layer_scale_init_value
lowercase_ : Union[str, Any] = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
lowercase_ , lowercase_ : int = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 93 | 1 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowercase : Optional[int] = logging.get_logger(__name__)
_lowercase : Any = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''umt5'''
lowerCAmelCase_ = ['''past_key_values''']
def __init__( self , __SCREAMING_SNAKE_CASE=25_01_12 , __SCREAMING_SNAKE_CASE=5_12 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=10_24 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_28 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1E-6 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE="gated-gelu" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="T5Tokenizer" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=__SCREAMING_SNAKE_CASE , tokenizer_class=__SCREAMING_SNAKE_CASE , tie_word_embeddings=__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowercase_ : List[str] = vocab_size
lowercase_ : Optional[Any] = d_model
lowercase_ : str = d_kv
lowercase_ : Any = d_ff
lowercase_ : Optional[Any] = num_layers
lowercase_ : Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase_ : Optional[Any] = num_heads
lowercase_ : int = relative_attention_num_buckets
lowercase_ : Tuple = relative_attention_max_distance
lowercase_ : Union[str, Any] = dropout_rate
lowercase_ : List[str] = layer_norm_epsilon
lowercase_ : str = initializer_factor
lowercase_ : int = feed_forward_proj
lowercase_ : Optional[Any] = use_cache
lowercase_ : Optional[Any] = self.feed_forward_proj.split('''-''' )
lowercase_ : List[str] = act_info[-1]
lowercase_ : List[str] = act_info[0] == '''gated'''
if len(__SCREAMING_SNAKE_CASE ) > 1 and act_info[0] != "gated" or len(__SCREAMING_SNAKE_CASE ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
lowercase_ : int = '''gelu_new'''
@property
def _snake_case ( self ):
"""simple docstring"""
return self.d_model
@property
def _snake_case ( self ):
"""simple docstring"""
return self.num_heads
@property
def _snake_case ( self ):
"""simple docstring"""
return self.num_layers
class lowerCAmelCase__ ( lowerCamelCase_ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
lowercase_ : str = '''past_encoder_sequence + sequence'''
lowercase_ : Optional[int] = {0: '''batch'''}
lowercase_ : List[str] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowercase_ : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
lowercase_ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _snake_case ( self ):
"""simple docstring"""
return 13
@property
def _snake_case ( self ):
"""simple docstring"""
return 5E-4
| 93 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Union[str, Any] = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 93 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = jnp.ones((batch_size, length) ) / length
return scores
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = None
lowercase_ : List[str] = 20
lowercase_ : Optional[int] = self._get_uniform_logits(batch_size=2 , length=__SCREAMING_SNAKE_CASE )
# tweak scores to not be uniform anymore
lowercase_ : List[str] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowercase_ : Optional[int] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowercase_ : Any = jax.nn.softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
lowercase_ : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase_ : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowercase_ : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(__SCREAMING_SNAKE_CASE , scores.copy() , cur_len=__SCREAMING_SNAKE_CASE ) , axis=-1 )
lowercase_ : Optional[Any] = jax.nn.softmax(temp_dist_warper_smoother(__SCREAMING_SNAKE_CASE , scores.copy() , cur_len=__SCREAMING_SNAKE_CASE ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = None
lowercase_ : List[str] = 10
lowercase_ : Tuple = 2
# create ramp distribution
lowercase_ : int = np.broadcast_to(np.arange(__SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy()
lowercase_ : Optional[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowercase_ : Optional[int] = FlaxTopKLogitsWarper(3 )
lowercase_ : Tuple = top_k_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowercase_ : int = 5
lowercase_ : Optional[int] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowercase_ : Optional[int] = np.broadcast_to(np.arange(__SCREAMING_SNAKE_CASE )[None, :] , (batch_size, length) ).copy()
lowercase_ : int = top_k_warp_safety_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = None
lowercase_ : Optional[int] = 10
lowercase_ : Any = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowercase_ : int = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowercase_ : List[Any] = FlaxTopPLogitsWarper(0.8 )
lowercase_ : Optional[int] = np.exp(top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowercase_ : Union[str, Any] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowercase_ : str = np.broadcast_to(np.arange(__SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowercase_ : Union[str, Any] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowercase_ : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowercase_ : Optional[Any] = top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = 20
lowercase_ : Tuple = 4
lowercase_ : Optional[Any] = 0
lowercase_ : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__SCREAMING_SNAKE_CASE )
# check that min length is applied at length 5
lowercase_ : List[Any] = ids_tensor((batch_size, 20) , vocab_size=20 )
lowercase_ : Dict = 5
lowercase_ : List[Any] = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = min_dist_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowercase_ : Optional[int] = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = 15
lowercase_ : List[Any] = min_dist_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(__SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = 20
lowercase_ : Tuple = 4
lowercase_ : str = 0
lowercase_ : Tuple = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the bos_token_id score
lowercase_ : Optional[int] = ids_tensor((batch_size, 1) , vocab_size=20 )
lowercase_ : str = 1
lowercase_ : Any = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowercase_ : str = 3
lowercase_ : int = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Dict = logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(__SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = 20
lowercase_ : int = 4
lowercase_ : Tuple = 0
lowercase_ : Any = 5
lowercase_ : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowercase_ : str = ids_tensor((batch_size, 4) , vocab_size=20 )
lowercase_ : Optional[int] = 4
lowercase_ : str = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowercase_ : List[str] = 3
lowercase_ : List[str] = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Dict = logits_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(__SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = 4
lowercase_ : Union[str, Any] = 10
lowercase_ : List[str] = 15
lowercase_ : Tuple = 2
lowercase_ : Tuple = 1
lowercase_ : int = 15
# dummy input_ids and scores
lowercase_ : Dict = ids_tensor((batch_size, sequence_length) , __SCREAMING_SNAKE_CASE )
lowercase_ : str = input_ids.copy()
lowercase_ : Dict = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = scores.copy()
# instantiate all dist processors
lowercase_ : int = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase_ : Any = FlaxTopKLogitsWarper(3 )
lowercase_ : Any = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowercase_ : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = 10
# no processor list
lowercase_ : Optional[Any] = temp_dist_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
lowercase_ : int = top_k_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = min_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = bos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = eos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
# with processor list
lowercase_ : Any = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowercase_ : Tuple = processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = 4
lowercase_ : Optional[Any] = 10
lowercase_ : List[str] = 15
lowercase_ : int = 2
lowercase_ : Optional[int] = 1
lowercase_ : int = 15
# dummy input_ids and scores
lowercase_ : Optional[int] = ids_tensor((batch_size, sequence_length) , __SCREAMING_SNAKE_CASE )
lowercase_ : Dict = input_ids.copy()
lowercase_ : Optional[Any] = self._get_uniform_logits(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = scores.copy()
# instantiate all dist processors
lowercase_ : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase_ : Optional[int] = FlaxTopKLogitsWarper(3 )
lowercase_ : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowercase_ : Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = 10
# no processor list
def run_no_processor_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = temp_dist_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = top_k_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = top_p_warp(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = min_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = bos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = eos_dist_proc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
return scores
# with processor list
def run_processor_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowercase_ : Tuple = processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cur_len=__SCREAMING_SNAKE_CASE )
return scores
lowercase_ : List[Any] = jax.jit(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = jax.jit(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = jitted_run_no_processor_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : int = jitted_run_processor_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 93 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = [10, 20, 30, 40, 50, 60]
lowercase_ : Optional[Any] = [2, 4, 6, 8, 10, 12]
lowercase_ : Union[str, Any] = 1_00
self.assertEqual(kp.calc_profit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 2_10 )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Weight can not be negative.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Profit can not be negative.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 93 | 1 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return base * power(__SCREAMING_SNAKE_CASE , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
_lowercase : List[Any] = int(input("Enter the base: ").strip())
_lowercase : Any = int(input("Enter the exponent: ").strip())
_lowercase : Union[str, Any] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_lowercase : int = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 93 |
'''simple docstring'''
import argparse
import copy
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
lowercase_ : List[Any] = {}
with open(__SCREAMING_SNAKE_CASE ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase_ : Union[str, Any] = []
_list.append([line.split()[1], line.split()[2]] )
lowercase_ : str = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase_ : Optional[int] = []
_list.append([line.split()[0], line.split()[2]] )
lowercase_ : Dict = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE ) as f:
lowercase_ : List[str] = f.read(1 )
lowercase_ : Optional[int] = start_node
lowercase_ : Any = []
lowercase_ : List[str] = start_node
lowercase_ : Optional[Any] = 0
while visiting not in first_solution:
lowercase_ : Any = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__SCREAMING_SNAKE_CASE ) and k[0] not in first_solution:
lowercase_ : List[Any] = k[1]
lowercase_ : List[Any] = k[0]
first_solution.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = distance_of_first_solution + int(__SCREAMING_SNAKE_CASE )
lowercase_ : int = best_node
first_solution.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase_ : Optional[Any] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
lowercase_ : Tuple = []
for n in solution[1:-1]:
lowercase_ : List[str] = solution.index(__SCREAMING_SNAKE_CASE )
for kn in solution[1:-1]:
lowercase_ : Any = solution.index(__SCREAMING_SNAKE_CASE )
if n == kn:
continue
lowercase_ : Dict = copy.deepcopy(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = kn
lowercase_ : List[Any] = n
lowercase_ : str = 0
for k in _tmp[:-1]:
lowercase_ : Tuple = _tmp[_tmp.index(__SCREAMING_SNAKE_CASE ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase_ : Optional[Any] = distance + int(i[1] )
_tmp.append(__SCREAMING_SNAKE_CASE )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase_ : Union[str, Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __SCREAMING_SNAKE_CASE : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = 1
lowercase_ : List[str] = first_solution
lowercase_ : Dict = []
lowercase_ : List[str] = distance_of_first_solution
lowercase_ : Optional[Any] = solution
while count <= iters:
lowercase_ : int = find_neighborhood(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = 0
lowercase_ : Dict = neighborhood[index_of_best_solution]
lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) - 1
lowercase_ : Tuple = False
while not found:
lowercase_ : Optional[int] = 0
while i < len(__SCREAMING_SNAKE_CASE ):
if best_solution[i] != solution[i]:
lowercase_ : Tuple = best_solution[i]
lowercase_ : Optional[int] = solution[i]
break
lowercase_ : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase_ : Tuple = True
lowercase_ : Optional[int] = best_solution[:-1]
lowercase_ : Optional[Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase_ : Optional[Any] = cost
lowercase_ : int = solution
else:
lowercase_ : Any = index_of_best_solution + 1
lowercase_ : Any = neighborhood[index_of_best_solution]
if len(__SCREAMING_SNAKE_CASE ) >= size:
tabu_list.pop(0 )
lowercase_ : List[Any] = count + 1
return best_solution_ever, best_cost
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str]=None ):
"""simple docstring"""
lowercase_ : Any = generate_neighbours(args.File )
lowercase_ , lowercase_ : Union[str, Any] = generate_first_solution(
args.File , __SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : Optional[int] = tabu_search(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 93 | 1 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[int] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__SCREAMING_SNAKE_CASE )
if number < 0:
return False
lowercase_ : str = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase_ : int = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
lowercase_ : Union[str, Any] = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
lowercase_ : Union[str, Any] = model(input_ids.to(__SCREAMING_SNAKE_CASE ) , labels=labels.to(__SCREAMING_SNAKE_CASE ) ).loss
lowercase_ : int = -(labels.shape[-1] * loss.item())
lowercase_ : Any = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 93 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowercase : Tuple = logging.get_logger(__name__)
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[tf.Tensor, np.ndarray] ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
return list(tensor.shape )
lowercase_ : str = tf.shape(__SCREAMING_SNAKE_CASE )
if tensor.shape == tf.TensorShape(__SCREAMING_SNAKE_CASE ):
return dynamic
lowercase_ : Dict = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__SCREAMING_SNAKE_CASE )]
def snake_case_ ( __SCREAMING_SNAKE_CASE : tf.Tensor , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9 , axis=__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-5 , __SCREAMING_SNAKE_CASE : Dict=-1 ):
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowercase_ , lowercase_ : Any = tf.nn.moments(__SCREAMING_SNAKE_CASE , axes=[axis] , keepdims=__SCREAMING_SNAKE_CASE )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase_ : Union[str, Any] = [1] * inputs.shape.rank
lowercase_ : Dict = shape_list(__SCREAMING_SNAKE_CASE )[axis]
lowercase_ : List[Any] = tf.reshape(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = tf.reshape(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Compute layer normalization using the batch_normalization
# function.
lowercase_ : List[Any] = tf.nn.batch_normalization(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , offset=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , variance_epsilon=__SCREAMING_SNAKE_CASE , )
return outputs
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : List[str]=-1 ):
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase_ : int = tf.shape(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase_ : List[Any] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : tf.Tensor ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , tf.Tensor ):
lowercase_ : str = tf.convert_to_tensor(__SCREAMING_SNAKE_CASE ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase_ : Optional[int] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase_ : Optional[int] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase_ : Dict = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def snake_case_ ( __SCREAMING_SNAKE_CASE : tf.Tensor , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str = "input_ids" ):
"""simple docstring"""
tf.debugging.assert_less(
__SCREAMING_SNAKE_CASE , tf.cast(__SCREAMING_SNAKE_CASE , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(__SCREAMING_SNAKE_CASE )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
lowercase_ : List[str] = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase_ : List[str] = [x for x in data if len(__SCREAMING_SNAKE_CASE ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
lowercase_ : Dict = np.asarray(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = 1
lowercase_ : Optional[Any] = np.array_split(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase_ : Optional[int] = np.array_split(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = chunk_data
else:
lowercase_ : int = data
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if name in group.attrs:
lowercase_ : List[Any] = [n.decode('''utf8''' ) if hasattr(__SCREAMING_SNAKE_CASE , '''decode''' ) else n for n in group.attrs[name]]
else:
lowercase_ : Dict = []
lowercase_ : Dict = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(__SCREAMING_SNAKE_CASE , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
def _expand_single_ad_tensor(__SCREAMING_SNAKE_CASE : Any ):
if isinstance(__SCREAMING_SNAKE_CASE , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__SCREAMING_SNAKE_CASE , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __SCREAMING_SNAKE_CASE )
| 93 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : List[str] = len(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = []
for i in range(len(__SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowercase_ : Tuple = True
for j in range(__SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowercase_ : List[str] = False
break
if match_found:
position.append(__SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 93 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[Any] = "▁"
_lowercase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_lowercase : Optional[int] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
_lowercase : str = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
_lowercase : List[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
lowercase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
lowercase_ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ : Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ : str = 1
lowercase_ : str = len(self.sp_model )
lowercase_ : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__SCREAMING_SNAKE_CASE )
}
lowercase_ : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
lowercase_ : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase_ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase_ : Optional[Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase_ : Optional[Any] = src_lang if src_lang is not None else '''en_XX'''
lowercase_ : str = self.lang_code_to_id[self._src_lang]
lowercase_ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
lowercase_ : Optional[int] = self.__dict__.copy()
lowercase_ : Dict = None
lowercase_ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : Dict = {}
lowercase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _snake_case ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = [1] * len(self.prefix_tokens )
lowercase_ : Tuple = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Optional[int] = [self.sep_token_id]
lowercase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase_ : Optional[Any] = src_lang
lowercase_ : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = tgt_lang_id
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ : Any = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Tuple = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowercase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : List[str] = src_lang
lowercase_ : int = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = self.lang_code_to_id[src_lang]
lowercase_ : Optional[Any] = []
lowercase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = self.lang_code_to_id[lang]
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
| 93 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_lowercase : Optional[Any] = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
_lowercase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Tuple = '''https://pypi.org/pypi/diffusers/json'''
lowercase_ : Tuple = json.loads(request.urlopen(__SCREAMING_SNAKE_CASE ).read() )['''releases'''].keys()
return sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : version.Version(__SCREAMING_SNAKE_CASE ) )
def snake_case_ ( ):
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__SCREAMING_SNAKE_CASE )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = Path(__SCREAMING_SNAKE_CASE ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
lowercase_ : str = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : int = f.read()
# Imports of the form `import .xxx`
lowercase_ : List[Any] = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Unique-ify
return list(set(__SCREAMING_SNAKE_CASE ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : int = False
lowercase_ : Any = [module_file]
lowercase_ : Dict = []
# Let's recurse through all relative imports
while not no_change:
lowercase_ : Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Union[str, Any] = Path(__SCREAMING_SNAKE_CASE ).parent
lowercase_ : Optional[int] = [str(module_path / m ) for m in new_imports]
lowercase_ : str = [f for f in new_import_files if f not in all_relative_imports]
lowercase_ : int = [F'''{f}.py''' for f in new_import_files]
lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) == 0
all_relative_imports.extend(__SCREAMING_SNAKE_CASE )
return all_relative_imports
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : Union[str, Any] = f.read()
# Imports of the form `import xxx`
lowercase_ : Any = re.findall('''^\s*import\s+(\S+)\s*$''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Only keep the top-level module
lowercase_ : List[str] = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
lowercase_ : Any = list(set(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Optional[Any] = []
for imp in imports:
try:
importlib.import_module(__SCREAMING_SNAKE_CASE )
except ImportError:
missing_packages.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F'''{', '.join(__SCREAMING_SNAKE_CASE )}. Run `pip install {' '.join(__SCREAMING_SNAKE_CASE )}`''' )
return get_relative_imports(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
lowercase_ : List[Any] = module_path.replace(os.path.sep , '''.''' )
lowercase_ : Any = importlib.import_module(__SCREAMING_SNAKE_CASE )
if class_name is None:
return find_pipeline_class(__SCREAMING_SNAKE_CASE )
return getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
lowercase_ : int = dict(inspect.getmembers(__SCREAMING_SNAKE_CASE , inspect.isclass ) )
lowercase_ : Optional[Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __SCREAMING_SNAKE_CASE )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
lowercase_ : List[Any] = cls
return pipeline_class
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = False , ):
"""simple docstring"""
lowercase_ : Dict = str(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = module_file_or_url
lowercase_ : int = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
lowercase_ : Optional[int] = get_diffusers_versions()
# cut ".dev0"
lowercase_ : List[Any] = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
lowercase_ : List[str] = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
lowercase_ : List[str] = F'''v{revision}'''
elif revision == "main":
lowercase_ : Optional[Any] = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {', '.join(available_versions + ['main'] )}.''' )
# community pipeline on GitHub
lowercase_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=__SCREAMING_SNAKE_CASE , pipeline=__SCREAMING_SNAKE_CASE )
try:
lowercase_ : Optional[Any] = cached_download(
__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , )
lowercase_ : Tuple = '''git'''
lowercase_ : Tuple = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
lowercase_ : str = hf_hub_download(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[Any] = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
lowercase_ : Tuple = check_imports(__SCREAMING_SNAKE_CASE )
# Now we move the module inside our cached dynamic modules.
lowercase_ : str = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = Path(__SCREAMING_SNAKE_CASE ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__SCREAMING_SNAKE_CASE , submodule_path / module_file )
for module_needed in modules_needed:
lowercase_ : Union[str, Any] = F'''{module_needed}.py'''
shutil.copy(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = use_auth_token
elif use_auth_token is True:
lowercase_ : List[Any] = HfFolder.get_token()
else:
lowercase_ : Optional[Any] = None
lowercase_ : Optional[int] = model_info(__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , token=__SCREAMING_SNAKE_CASE ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowercase_ : int = submodule_path / commit_hash
lowercase_ : Tuple = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__SCREAMING_SNAKE_CASE )
if not (submodule_path / module_file).exists():
shutil.copy(__SCREAMING_SNAKE_CASE , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__SCREAMING_SNAKE_CASE , F'''{module_needed}.py''' , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
return os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
"""simple docstring"""
lowercase_ : Optional[Any] = get_cached_module_file(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
return get_class_in_module(__SCREAMING_SNAKE_CASE , final_module.replace('''.py''' , '''''' ) )
| 93 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowercase_ : str = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE )
lowercase_ : int = [t[-1] for t in os.walk(os.path.join(__SCREAMING_SNAKE_CASE , os.listdir(__SCREAMING_SNAKE_CASE )[0] , '''snapshots''' ) )]
lowercase_ : List[str] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase_ : Tuple = jax.random.PRNGKey(0 )
lowercase_ : Any = 4
lowercase_ : Union[str, Any] = jax.device_count()
lowercase_ : Dict = num_samples * [prompt]
lowercase_ : Tuple = pipeline.prepare_inputs(__SCREAMING_SNAKE_CASE )
# shard inputs and rng
lowercase_ : Optional[Any] = replicate(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = jax.random.split(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = shard(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = pipeline(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , jit=__SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1E-3
assert np.abs(np.abs(__SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49_947.875 ) < 5E-1
lowercase_ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__SCREAMING_SNAKE_CASE ) == num_samples
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase_ : Any = jax.random.PRNGKey(0 )
lowercase_ : List[str] = 50
lowercase_ : Any = jax.device_count()
lowercase_ : Tuple = num_samples * [prompt]
lowercase_ : str = pipeline.prepare_inputs(__SCREAMING_SNAKE_CASE )
# shard inputs and rng
lowercase_ : Optional[Any] = replicate(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = jax.random.split(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Dict = shard(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = pipeline(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , jit=__SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1E-3
assert np.abs((np.abs(__SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_383_808.2) ) < 5E-1
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase_ : Optional[int] = jax.random.PRNGKey(0 )
lowercase_ : str = 50
lowercase_ : int = jax.device_count()
lowercase_ : str = num_samples * [prompt]
lowercase_ : List[str] = pipeline.prepare_inputs(__SCREAMING_SNAKE_CASE )
# shard inputs and rng
lowercase_ : int = replicate(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = jax.random.split(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : int = shard(__SCREAMING_SNAKE_CASE )
lowercase_ : str = pipeline(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , jit=__SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(__SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
lowercase_ : str = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase_ : int = jax.random.PRNGKey(0 )
lowercase_ : List[Any] = 50
lowercase_ : Tuple = jax.device_count()
lowercase_ : Any = num_samples * [prompt]
lowercase_ : int = pipeline.prepare_inputs(__SCREAMING_SNAKE_CASE )
# shard inputs and rng
lowercase_ : Dict = replicate(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = jax.random.split(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = shard(__SCREAMING_SNAKE_CASE )
lowercase_ : int = pipeline(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , jit=__SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(__SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , )
lowercase_ , lowercase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , )
lowercase_ : str = scheduler.create_state()
lowercase_ : Optional[int] = scheduler_state
lowercase_ : Optional[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase_ : Tuple = jax.random.PRNGKey(0 )
lowercase_ : int = 50
lowercase_ : str = jax.device_count()
lowercase_ : List[Any] = num_samples * [prompt]
lowercase_ : Tuple = pipeline.prepare_inputs(__SCREAMING_SNAKE_CASE )
# shard inputs and rng
lowercase_ : Tuple = replicate(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = jax.random.split(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = shard(__SCREAMING_SNAKE_CASE )
lowercase_ : int = pipeline(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , jit=__SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1E-3
assert np.abs((np.abs(__SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_347_693.5) ) < 5E-1
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase_ : List[Any] = jax.device_count()
lowercase_ : List[Any] = num_samples * [prompt]
lowercase_ : Optional[Any] = jax.random.split(jax.random.PRNGKey(0 ) , __SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__SCREAMING_SNAKE_CASE , )
lowercase_ : List[str] = replicate(__SCREAMING_SNAKE_CASE )
lowercase_ : str = pipeline.prepare_inputs(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = shard(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = pipeline(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , jit=__SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
lowercase_ : Union[str, Any] = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
lowercase_ , lowercase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__SCREAMING_SNAKE_CASE , use_memory_efficient_attention=__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[Any] = replicate(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = pipeline.prepare_inputs(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = shard(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = pipeline(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , jit=__SCREAMING_SNAKE_CASE ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
lowercase_ : Tuple = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 93 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
_lowercase : Union[str, Any] = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
_lowercase : int = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_lowercase : Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Any = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_lowercase : str = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : List[str] = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
_lowercase : Tuple = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[Any] = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
_lowercase : Tuple = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[int] = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
_lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
_lowercase : Optional[int] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
_lowercase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
_lowercase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
_lowercase : int = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
_lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
_lowercase : int = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
_lowercase : List[str] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : str = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
_lowercase : Dict = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
_lowercase : List[str] = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
_lowercase : str = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
_lowercase : List[Any] = ""
_lowercase : Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
_lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[Any] = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
assert ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(expected_error.format(path='''root''' ) ) ):
lowercase_ : Optional[int] = ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , suppress_parsing_errors=__SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : str = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = expected_error.format(path=__SCREAMING_SNAKE_CASE )
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(__SCREAMING_SNAKE_CASE ) ):
lowercase_ : int = ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = expected_error.format(path=__SCREAMING_SNAKE_CASE )
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(__SCREAMING_SNAKE_CASE ) ):
ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , suppress_parsing_errors=__SCREAMING_SNAKE_CASE )
| 93 | 1 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_00 , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , ):
"""simple docstring"""
lowercase_ : Optional[int] = parent
lowercase_ : Tuple = 1_00
lowercase_ : Any = batch_size
lowercase_ : Union[str, Any] = image_size
lowercase_ : Dict = patch_size
lowercase_ : List[str] = num_channels
lowercase_ : str = is_training
lowercase_ : Any = use_labels
lowercase_ : Optional[int] = hidden_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : Union[str, Any] = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : Dict = type_sequence_label_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : str = scope
lowercase_ : str = out_indices
lowercase_ : List[str] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ : Tuple = (image_size // patch_size) ** 2
lowercase_ : int = num_patches + 1
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[str] = None
lowercase_ : int = None
if self.use_labels:
lowercase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def _snake_case ( self ):
"""simple docstring"""
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = BeitModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : Any = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = BeitForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : int = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = self.type_sequence_label_size
lowercase_ : List[Any] = BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ : Optional[int] = 1
lowercase_ : Optional[Any] = BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Tuple = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = self.num_labels
lowercase_ : List[str] = BeitForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : List[str] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
lowercase_ : int = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = BeitModelTester(self )
lowercase_ : Dict = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def _snake_case ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Any = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : int = [*signature.parameters.keys()]
lowercase_ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling]:
continue
lowercase_ : int = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
lowercase_ : str = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ : Any = False
lowercase_ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase_ : int = model_class(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(__SCREAMING_SNAKE_CASE )
model.train()
lowercase_ : Dict = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : str = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Dict = BeitModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def _snake_case ( self ):
"""simple docstring"""
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : int = self.default_image_processor
lowercase_ : Optional[int] = prepare_img()
lowercase_ : str = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values.to(__SCREAMING_SNAKE_CASE )
# prepare bool_masked_pos
lowercase_ : Union[str, Any] = torch.ones((1, 1_96) , dtype=torch.bool ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase_ : str = model(pixel_values=__SCREAMING_SNAKE_CASE , bool_masked_pos=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = outputs.logits
# verify the logits
lowercase_ : Optional[int] = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-2 ) )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = self.default_image_processor
lowercase_ : Union[str, Any] = prepare_img()
lowercase_ : Any = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase_ : List[Any] = model(**__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = outputs.logits
# verify the logits
lowercase_ : Any = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
lowercase_ : Any = 2_81
self.assertEqual(logits.argmax(-1 ).item() , __SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = self.default_image_processor
lowercase_ : Any = prepare_img()
lowercase_ : Any = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase_ : List[str] = model(**__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = outputs.logits
# verify the logits
lowercase_ : Union[str, Any] = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
lowercase_ : str = 23_96
self.assertEqual(logits.argmax(-1 ).item() , __SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowercase_ : Any = model.to(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = BeitImageProcessor(do_resize=__SCREAMING_SNAKE_CASE , size=6_40 , do_center_crop=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowercase_ : Any = Image.open(ds[0]['''file'''] )
lowercase_ : Tuple = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase_ : Union[str, Any] = model(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = outputs.logits
# verify the logits
lowercase_ : Any = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
lowercase_ : int = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
lowercase_ : Dict = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=__SCREAMING_SNAKE_CASE , )
else:
lowercase_ : int = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=__SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowercase_ : str = model.to(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = BeitImageProcessor(do_resize=__SCREAMING_SNAKE_CASE , size=6_40 , do_center_crop=__SCREAMING_SNAKE_CASE )
lowercase_ : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowercase_ : Optional[int] = Image.open(ds[0]['''file'''] )
lowercase_ : Union[str, Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase_ : str = model(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = outputs.logits.detach().cpu()
lowercase_ : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE , target_sizes=[(5_00, 3_00)] )
lowercase_ : List[Any] = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
| 93 |
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = eval_examples
lowercase_ : Tuple = post_process_function
def _snake_case ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "eval" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[int] = gen_kwargs.copy()
lowercase_ : List[str] = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase_ : str = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase_ : Dict = gen_kwargs
lowercase_ : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase_ : List[str] = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase_ : Union[str, Any] = self.compute_metrics
lowercase_ : Optional[int] = None
lowercase_ : Tuple = time.time()
lowercase_ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase_ : str = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
lowercase_ : Any = compute_metrics
lowercase_ : Any = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase_ : Optional[Any] = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase_ : List[Any] = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
lowercase_ : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase_ : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE )
return metrics
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Union[str, Any] = gen_kwargs.copy()
lowercase_ : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase_ : Optional[Any] = self.compute_metrics
lowercase_ : Optional[int] = None
lowercase_ : List[Any] = time.time()
lowercase_ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase_ : Tuple = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
lowercase_ : Any = compute_metrics
lowercase_ : Tuple = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase_ : Any = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''' )
lowercase_ : str = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase_ : Optional[int] = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
| 93 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowercase : str = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = ['''pixel_values''']
def __init__( self , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0.9 , __SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 / 2_55 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = size if size is not None else {'''shortest_edge''': 2_24}
lowercase_ : Tuple = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
lowercase_ : Optional[Any] = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
lowercase_ : Dict = do_resize
lowercase_ : Any = size
lowercase_ : List[str] = crop_pct
lowercase_ : Union[str, Any] = resample
lowercase_ : List[Any] = do_center_crop
lowercase_ : Union[str, Any] = crop_size
lowercase_ : Dict = do_rescale
lowercase_ : Dict = rescale_factor
lowercase_ : str = do_normalize
lowercase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[Any] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
lowercase_ : List[str] = int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowercase_ : Dict = int(size['''height'''] / crop_pct )
else:
lowercase_ : int = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(__SCREAMING_SNAKE_CASE ) )
lowercase_ : int = get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
else:
if "shortest_edge" in size:
lowercase_ : Optional[Any] = get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=__SCREAMING_SNAKE_CASE )
elif "height" in size and "width" in size:
lowercase_ : Dict = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(__SCREAMING_SNAKE_CASE ) )
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : List[Any] = do_resize if do_resize is not None else self.do_resize
lowercase_ : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowercase_ : Optional[int] = resample if resample is not None else self.resample
lowercase_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Any = image_mean if image_mean is not None else self.image_mean
lowercase_ : str = image_std if image_std is not None else self.image_std
lowercase_ : Optional[int] = size if size is not None else self.size
lowercase_ : Union[str, Any] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowercase_ : Optional[int] = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
lowercase_ : Union[str, Any] = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase_ : Union[str, Any] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
lowercase_ : Union[str, Any] = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , crop_pct=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
lowercase_ : Optional[Any] = [self.center_crop(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowercase_ : List[str] = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
lowercase_ : Tuple = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images]
lowercase_ : List[str] = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
lowercase_ : Union[str, Any] = {'''pixel_values''': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
| 93 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase : List[str] = ["text", "image", "audio"]
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
lowercase_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
inputs.append(create_inputs(__SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def snake_case_ ( __SCREAMING_SNAKE_CASE : List ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
for output in outputs:
if isinstance(__SCREAMING_SNAKE_CASE , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(__SCREAMING_SNAKE_CASE , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(__SCREAMING_SNAKE_CASE , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class lowerCAmelCase__ :
def _snake_case ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , __SCREAMING_SNAKE_CASE ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = create_inputs(self.tool.inputs )
lowercase_ : Tuple = self.tool(*__SCREAMING_SNAKE_CASE )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Any = [outputs]
self.assertListEqual(output_types(__SCREAMING_SNAKE_CASE ) , self.tool.outputs )
def _snake_case ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = create_inputs(self.tool.inputs )
lowercase_ : int = self.tool(*__SCREAMING_SNAKE_CASE )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = [outputs]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
for output, output_type in zip(__SCREAMING_SNAKE_CASE , self.tool.outputs ):
lowercase_ : Optional[int] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : int = []
for _input, input_type in zip(__SCREAMING_SNAKE_CASE , self.tool.inputs ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Optional[Any] = self.tool(*__SCREAMING_SNAKE_CASE )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = [outputs]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
| 93 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : int = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''donut-swin'''
lowerCAmelCase_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=2_24 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = image_size
lowercase_ : List[str] = patch_size
lowercase_ : Union[str, Any] = num_channels
lowercase_ : List[Any] = embed_dim
lowercase_ : List[str] = depths
lowercase_ : List[str] = len(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = num_heads
lowercase_ : int = window_size
lowercase_ : List[str] = mlp_ratio
lowercase_ : List[Any] = qkv_bias
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : Optional[Any] = drop_path_rate
lowercase_ : Tuple = hidden_act
lowercase_ : Any = use_absolute_embeddings
lowercase_ : Optional[int] = layer_norm_eps
lowercase_ : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase_ : str = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
| 93 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase__ :
lowerCAmelCase_ = 42
# setable values
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(common=__SCREAMING_SNAKE_CASE , init_noise_sigma=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
@dataclass
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = 42
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCAmelCase_ = 42
@property
def _snake_case ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , __SCREAMING_SNAKE_CASE = 10_00 , __SCREAMING_SNAKE_CASE = 0.0_001 , __SCREAMING_SNAKE_CASE = 0.02 , __SCREAMING_SNAKE_CASE = "linear" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "fixed_small" , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "epsilon" , __SCREAMING_SNAKE_CASE = jnp.floataa , ):
"""simple docstring"""
lowercase_ : Dict = dtype
def _snake_case ( self , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if common is None:
lowercase_ : Tuple = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase_ : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype )
lowercase_ : List[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__SCREAMING_SNAKE_CASE , init_noise_sigma=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
return sample
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = () ):
"""simple docstring"""
lowercase_ : Optional[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase_ : int = (jnp.arange(0 , __SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
lowercase_ : List[Any] = state.common.alphas_cumprod[t]
lowercase_ : str = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase_ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase_ : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase_ : int = jnp.clip(__SCREAMING_SNAKE_CASE , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase_ : List[str] = jnp.log(jnp.clip(__SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase_ : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase_ : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase_ : Optional[Any] = variance
lowercase_ : Union[str, Any] = state.common.betas[t]
lowercase_ : Union[str, Any] = (predicted_variance + 1) / 2
lowercase_ : Any = frac * max_log + (1 - frac) * min_log
return variance
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
lowercase_ : Optional[int] = timestep
if key is None:
lowercase_ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase_ , lowercase_ : Optional[Any] = jnp.split(__SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
lowercase_ : int = None
# 1. compute alphas, betas
lowercase_ : Any = state.common.alphas_cumprod[t]
lowercase_ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase_ : int = 1 - alpha_prod_t
lowercase_ : str = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase_ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase_ : Any = model_output
elif self.config.prediction_type == "v_prediction":
lowercase_ : List[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase_ : Optional[Any] = jnp.clip(__SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase_ : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase_ : Optional[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase_ : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase_ : str = jax.random.split(__SCREAMING_SNAKE_CASE , num=1 )
lowercase_ : List[Any] = jax.random.normal(__SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , predicted_variance=__SCREAMING_SNAKE_CASE ) ** 0.5) * noise
lowercase_ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase_ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__SCREAMING_SNAKE_CASE , state=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return add_noise_common(state.common , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return get_velocity_common(state.common , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 93 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
lowerCAmelCase_ = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = AudioClassificationPipeline(model=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
# test with a raw waveform
lowercase_ : Tuple = np.zeros((3_40_00,) )
lowercase_ : str = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ , lowercase_ : List[str] = examples
lowercase_ : Tuple = audio_classifier(__SCREAMING_SNAKE_CASE )
# by default a model is initialized with num_labels=2
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
] , )
lowercase_ : int = audio_classifier(__SCREAMING_SNAKE_CASE , top_k=1 )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
] , )
self.run_torchaudio(__SCREAMING_SNAKE_CASE )
@require_torchaudio
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
import datasets
# test with a local file
lowercase_ : Optional[int] = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
lowercase_ : Any = dataset[0]['''audio''']['''array''']
lowercase_ : Tuple = audio_classifier(__SCREAMING_SNAKE_CASE )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
] , )
@require_torch
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = '''anton-l/wav2vec2-random-tiny-classifier'''
lowercase_ : str = pipeline('''audio-classification''' , model=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = np.ones((80_00,) )
lowercase_ : List[Any] = audio_classifier(__SCREAMING_SNAKE_CASE , top_k=4 )
lowercase_ : Dict = [
{'''score''': 0.0_842, '''label''': '''no'''},
{'''score''': 0.0_838, '''label''': '''up'''},
{'''score''': 0.0_837, '''label''': '''go'''},
{'''score''': 0.0_834, '''label''': '''right'''},
]
lowercase_ : Optional[int] = [
{'''score''': 0.0_845, '''label''': '''stop'''},
{'''score''': 0.0_844, '''label''': '''on'''},
{'''score''': 0.0_841, '''label''': '''right'''},
{'''score''': 0.0_834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowercase_ : List[str] = {'''array''': np.ones((80_00,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
lowercase_ : Dict = audio_classifier(__SCREAMING_SNAKE_CASE , top_k=4 )
self.assertIn(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _snake_case ( self ):
"""simple docstring"""
import datasets
lowercase_ : Any = '''superb/wav2vec2-base-superb-ks'''
lowercase_ : Optional[Any] = pipeline('''audio-classification''' , model=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
lowercase_ : int = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
lowercase_ : Optional[Any] = audio_classifier(__SCREAMING_SNAKE_CASE , top_k=4 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=3 ) , [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def _snake_case ( self ):
"""simple docstring"""
pass
| 93 |
'''simple docstring'''
_lowercase : int = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowercase : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
_lowercase : List[str] = True
_lowercase : Optional[int] = False
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowercase_ : Tuple = chain(next_number(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Union[str, Any] = number_chain
while number < 10000000:
lowercase_ : int = number_chain
number *= 10
return number_chain
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 10000000 ):
"""simple docstring"""
for i in range(1 , __SCREAMING_SNAKE_CASE ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 93 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
lowerCAmelCase_ = '''CIDAS/clipseg-rd64-refined'''
lowerCAmelCase_ = '''image_segmenter'''
lowerCAmelCase_ = CLIPSegForImageSegmentation
lowerCAmelCase_ = ['''image''', '''text''']
lowerCAmelCase_ = ['''image''']
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with torch.no_grad():
lowercase_ : Tuple = self.model(**__SCREAMING_SNAKE_CASE ).logits
return logits
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = outputs.cpu().detach().numpy()
lowercase_ : Optional[int] = 0
lowercase_ : List[Any] = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 93 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Union[str, Any] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 93 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowercase : List[str] = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ):
"""simple docstring"""
if attention_mask is None:
lowercase_ : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowercase_ : Any = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowercase_ : Dict = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0.02 , ):
"""simple docstring"""
lowercase_ : Optional[int] = parent
lowercase_ : Tuple = batch_size
lowercase_ : Tuple = seq_length
lowercase_ : Optional[int] = is_training
lowercase_ : Union[str, Any] = use_labels
lowercase_ : int = vocab_size
lowercase_ : Tuple = hidden_size
lowercase_ : Optional[Any] = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : str = hidden_act
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : Any = eos_token_id
lowercase_ : Optional[int] = pad_token_id
lowercase_ : Dict = bos_token_id
lowercase_ : Optional[int] = initializer_range
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowercase_ : Dict = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowercase_ : Optional[int] = shift_tokens_right(__SCREAMING_SNAKE_CASE , 1 , 2 )
lowercase_ : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[int] = prepare_blenderbot_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return config, inputs_dict
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = 20
lowercase_ : Optional[Any] = model_class_name(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = model.encode(inputs_dict['''input_ids'''] )
lowercase_ , lowercase_ : List[Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowercase_ : int = model.init_cache(decoder_input_ids.shape[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowercase_ : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
lowercase_ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase_ : List[str] = model.decode(
decoder_input_ids[:, -1:] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
lowercase_ : Tuple = model.decode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : int = 20
lowercase_ : Any = model_class_name(__SCREAMING_SNAKE_CASE )
lowercase_ : int = model.encode(inputs_dict['''input_ids'''] )
lowercase_ , lowercase_ : Tuple = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowercase_ : List[str] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowercase_ : Any = model.init_cache(decoder_input_ids.shape[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ : List[Any] = model.decode(
decoder_input_ids[:, :-1] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
lowercase_ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase_ : Dict = model.decode(
decoder_input_ids[:, -1:] , __SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
lowercase_ : int = model.decode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ = 9_9
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowercase_ : str = input_ids.shape[0]
lowercase_ : Tuple = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ : List[Any] = self._get_config_and_data()
lowercase_ : List[Any] = FlaxBlenderbotForConditionalGeneration(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = lm_model(input_ids=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowercase_ : Tuple = FlaxBlenderbotForConditionalGeneration(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
lowercase_ : Optional[int] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
lowercase_ : Dict = lm_model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
lowercase_ : List[Any] = shift_tokens_right(__SCREAMING_SNAKE_CASE , 1 , 2 )
lowercase_ : Optional[int] = np.equal(__SCREAMING_SNAKE_CASE , 1 ).astype(np.floataa ).sum()
lowercase_ : Tuple = np.equal(__SCREAMING_SNAKE_CASE , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__SCREAMING_SNAKE_CASE , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase , lowerCamelCase_ ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = FlaxBlenderbotModelTester(self )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase_ : Tuple = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
@jax.jit
def encode_jitted(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
return model.encode(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
with self.subTest('''JIT Enabled''' ):
lowercase_ : Tuple = encode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase_ : str = encode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase_ : Dict = model_class(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowercase_ : Optional[int] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return model.decode(
decoder_input_ids=__SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , encoder_outputs=__SCREAMING_SNAKE_CASE , )
with self.subTest('''JIT Enabled''' ):
lowercase_ : List[str] = decode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase_ : List[str] = decode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ : List[Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowercase_ : Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id
lowercase_ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
lowercase_ : int = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
lowercase_ : Dict = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
lowercase_ : Any = ['''Sam''']
lowercase_ : Optional[int] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''jax''' )
lowercase_ : List[str] = model.generate(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : str = '''Sam is a great name. It means "sun" in Gaelic.'''
lowercase_ : str = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
assert generated_txt[0].strip() == tgt_text
| 93 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Optional[int] = int(__SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : List[str] = divmod(__SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : str = str(__SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('''No input value was provided''' )
lowercase_ : Optional[int] = '''-''' if number.startswith('''-''' ) else ''''''
lowercase_ : Union[str, Any] = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return F'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93 | 1 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case_ ( __SCREAMING_SNAKE_CASE : bytes , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Any = F'''{sampling_rate}'''
lowercase_ : Union[str, Any] = '''1'''
lowercase_ : Optional[Any] = '''f32le'''
lowercase_ : Optional[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase_ : List[str] = ffmpeg_process.communicate(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
lowercase_ : int = output_stream[0]
lowercase_ : Dict = np.frombuffer(__SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : str = "f32le" , ):
"""simple docstring"""
lowercase_ : str = F'''{sampling_rate}'''
lowercase_ : Optional[Any] = '''1'''
if format_for_conversion == "s16le":
lowercase_ : Any = 2
elif format_for_conversion == "f32le":
lowercase_ : Optional[int] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowercase_ : List[Any] = platform.system()
if system == "Linux":
lowercase_ : Optional[int] = '''alsa'''
lowercase_ : Dict = '''default'''
elif system == "Darwin":
lowercase_ : int = '''avfoundation'''
lowercase_ : str = ''':0'''
elif system == "Windows":
lowercase_ : int = '''dshow'''
lowercase_ : Optional[Any] = '''default'''
lowercase_ : Dict = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
lowercase_ : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase_ : int = _ffmpeg_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[Union[Tuple[float, float], float]] = None , __SCREAMING_SNAKE_CASE : str = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
lowercase_ : List[str] = stream_chunk_s
else:
lowercase_ : Optional[int] = chunk_length_s
lowercase_ : List[Any] = ffmpeg_microphone(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , format_for_conversion=__SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
lowercase_ : Tuple = np.intaa
lowercase_ : Dict = 2
elif format_for_conversion == "f32le":
lowercase_ : List[Any] = np.floataa
lowercase_ : List[Any] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowercase_ : List[str] = chunk_length_s / 6
lowercase_ : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
lowercase_ : Tuple = [stride_length_s, stride_length_s]
lowercase_ : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase_ : int = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase_ : Tuple = datetime.datetime.now()
lowercase_ : Dict = datetime.timedelta(seconds=__SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=__SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
lowercase_ : int = np.frombuffer(item['''raw'''] , dtype=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
lowercase_ : Optional[Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple[int, int] , __SCREAMING_SNAKE_CASE : bool = False ):
"""simple docstring"""
lowercase_ : Optional[int] = B''''''
lowercase_ , lowercase_ : Dict = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowercase_ : Any = 0
for raw in iterator:
acc += raw
if stream and len(__SCREAMING_SNAKE_CASE ) < chunk_len:
lowercase_ : List[str] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
lowercase_ : Tuple = (_stride_left, stride_right)
lowercase_ : List[str] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
lowercase_ : int = False
yield item
lowercase_ : Optional[Any] = stride_left
lowercase_ : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__SCREAMING_SNAKE_CASE ) > stride_left:
lowercase_ : Union[str, Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
lowercase_ : Union[str, Any] = False
yield item
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : str = 2**24 # 16Mo
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=__SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
lowercase_ : Any = ffmpeg_process.stdout.read(__SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 93 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowercase : Any = (7_2_0, 1_2_8_0) # Height, Width
_lowercase : List[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowercase : str = 1 / 1_0_0
_lowercase : Any = ""
_lowercase : Union[str, Any] = ""
_lowercase : Optional[int] = ""
_lowercase : List[Any] = 2_5_0
def snake_case_ ( ):
"""simple docstring"""
lowercase_ , lowercase_ : Any = get_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for index in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : str = random.sample(range(len(__SCREAMING_SNAKE_CASE ) ) , 4 )
lowercase_ , lowercase_ , lowercase_ : Any = update_image_and_anno(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , filter_scale=__SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase_ : int = random_chars(32 )
lowercase_ : str = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
lowercase_ : int = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , __SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowercase_ : List[Any] = []
for anno in new_annos:
lowercase_ : List[Any] = anno[3] - anno[1]
lowercase_ : List[str] = anno[4] - anno[2]
lowercase_ : Dict = anno[1] + width / 2
lowercase_ : Dict = anno[2] + height / 2
lowercase_ : int = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(__SCREAMING_SNAKE_CASE )
with open(F'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
lowercase_ : Optional[Any] = []
for label_file in glob.glob(os.path.join(__SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
lowercase_ : int = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__SCREAMING_SNAKE_CASE ) as in_file:
lowercase_ : List[str] = in_file.readlines()
lowercase_ : Optional[Any] = os.path.join(__SCREAMING_SNAKE_CASE , F'''{label_name}.jpg''' )
lowercase_ : Optional[int] = []
for obj_list in obj_lists:
lowercase_ : List[str] = obj_list.rstrip('''\n''' ).split(''' ''' )
lowercase_ : Optional[int] = float(obj[1] ) - float(obj[3] ) / 2
lowercase_ : Any = float(obj[2] ) - float(obj[4] ) / 2
lowercase_ : str = float(obj[1] ) + float(obj[3] ) / 2
lowercase_ : List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__SCREAMING_SNAKE_CASE )
labels.append(__SCREAMING_SNAKE_CASE )
return img_paths, labels
def snake_case_ ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : tuple[int, int] , __SCREAMING_SNAKE_CASE : tuple[float, float] , __SCREAMING_SNAKE_CASE : float = 0.0 , ):
"""simple docstring"""
lowercase_ : List[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowercase_ : Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ : Optional[int] = int(scale_x * output_size[1] )
lowercase_ : Dict = int(scale_y * output_size[0] )
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = []
for i, index in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = all_img_list[index]
path_list.append(__SCREAMING_SNAKE_CASE )
lowercase_ : int = all_annos[index]
lowercase_ : Dict = cva.imread(__SCREAMING_SNAKE_CASE )
if i == 0: # top-left
lowercase_ : Optional[Any] = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
lowercase_ : Tuple = img
for bbox in img_annos:
lowercase_ : Optional[int] = bbox[1] * scale_x
lowercase_ : Optional[Any] = bbox[2] * scale_y
lowercase_ : str = bbox[3] * scale_x
lowercase_ : Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowercase_ : Dict = cva.resize(__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
lowercase_ : Dict = img
for bbox in img_annos:
lowercase_ : int = scale_x + bbox[1] * (1 - scale_x)
lowercase_ : Dict = bbox[2] * scale_y
lowercase_ : Optional[int] = scale_x + bbox[3] * (1 - scale_x)
lowercase_ : int = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowercase_ : List[Any] = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
lowercase_ : List[str] = img
for bbox in img_annos:
lowercase_ : Any = bbox[1] * scale_x
lowercase_ : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
lowercase_ : str = bbox[3] * scale_x
lowercase_ : Optional[int] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowercase_ : int = cva.resize(
__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowercase_ : List[str] = img
for bbox in img_annos:
lowercase_ : int = scale_x + bbox[1] * (1 - scale_x)
lowercase_ : Any = scale_y + bbox[2] * (1 - scale_y)
lowercase_ : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
lowercase_ : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowercase_ : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
lowercase_ : Any = ascii_lowercase + digits
return "".join(random.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 93 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : str = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''lilt'''
def __init__( self , __SCREAMING_SNAKE_CASE=3_05_22 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=30_72 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_12 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-1_2 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=10_24 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = vocab_size
lowercase_ : Dict = hidden_size
lowercase_ : int = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : Tuple = hidden_act
lowercase_ : List[str] = intermediate_size
lowercase_ : int = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : Optional[int] = max_position_embeddings
lowercase_ : int = type_vocab_size
lowercase_ : List[str] = initializer_range
lowercase_ : Tuple = layer_norm_eps
lowercase_ : Tuple = position_embedding_type
lowercase_ : Tuple = classifier_dropout
lowercase_ : Any = channel_shrink_ratio
lowercase_ : List[Any] = max_ad_position_embeddings
| 93 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase__ :
def __init__( self ):
"""simple docstring"""
lowercase_ : int = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = probability
def _snake_case ( self ):
"""simple docstring"""
return list(self.connections )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = 0
lowercase_ : Tuple = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : list[tuple[str, str, float]] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = Counter(graph.get_nodes() )
lowercase_ : Any = start
for _ in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : int = graph.transition(__SCREAMING_SNAKE_CASE )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 | 1 |
'''simple docstring'''
from __future__ import annotations
import requests
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Any = F'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(__SCREAMING_SNAKE_CASE ).json()
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 10 ):
"""simple docstring"""
lowercase_ : int = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
lowercase_ : Tuple = requests.get(__SCREAMING_SNAKE_CASE ).json()[:max_stories]
return [get_hackernews_story(__SCREAMING_SNAKE_CASE ) for story_id in story_ids]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 10 ):
"""simple docstring"""
lowercase_ : Dict = hackernews_top_stories(__SCREAMING_SNAKE_CASE )
return "\n".join('''* [{title}]({url})'''.format(**__SCREAMING_SNAKE_CASE ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 93 |
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
lowercase_ : Tuple = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = torch.nn.CosineSimilarity(3 , 1E-0_8 )
lowercase_ : Optional[Any] = torch.nn.Softmax(dim=1 )
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = W_supports['''sizes'''].tolist()
lowercase_ : Dict = W_supports['''start_token_id'''].item()
lowercase_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowercase_ : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.BERT(**__SCREAMING_SNAKE_CASE )
lowercase_ : str = None
lowercase_ : Dict = None
lowercase_ : Tuple = W_supports['''input_ids'''] == start_token_id
lowercase_ : Any = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
lowercase_ : List[str] = 0
else:
lowercase_ : List[Any] = support_sizes[i - 1]
lowercase_ : str = S[s : s + size][start_token_masks[s : s + size]]
lowercase_ : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
lowercase_ : List[str] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowercase_ : List[str] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowercase_ : Tuple = torch.vstack((p_starts, p_start) )
lowercase_ : Optional[Any] = torch.vstack((p_ends, p_end) )
else:
lowercase_ : str = p_start
lowercase_ : int = p_end
return p_starts, p_ends
| 93 | 1 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_lowercase : int = get_logger(__name__)
class lowerCAmelCase__ :
lowerCAmelCase_ = '''dummy_data'''
lowerCAmelCase_ = '''datasets'''
lowerCAmelCase_ = False
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
lowercase_ : Any = 0
lowercase_ : List[Any] = dataset_name
lowercase_ : int = cache_dir
lowercase_ : str = use_local_dummy_data
lowercase_ : int = config
# download_callbacks take a single url as input
lowercase_ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase_ : Optional[int] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase_ : Optional[Any] = str(__SCREAMING_SNAKE_CASE )
# to be downloaded
lowercase_ : Union[str, Any] = None
lowercase_ : int = None
@property
def _snake_case ( self ):
"""simple docstring"""
if self._dummy_file is None:
lowercase_ : Optional[Any] = self.download_dummy_data()
return self._dummy_file
@property
def _snake_case ( self ):
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def _snake_case ( self ):
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase_ : int = cached_path(
__SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=__SCREAMING_SNAKE_CASE , force_extract=__SCREAMING_SNAKE_CASE )
return os.path.join(__SCREAMING_SNAKE_CASE , self.dummy_file_name )
@property
def _snake_case ( self ):
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def _snake_case ( self ):
"""simple docstring"""
if self._bucket_url is None:
lowercase_ : Optional[int] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def _snake_case ( self ):
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase_ : Optional[int] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase_ : int = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.create_dummy_data_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self.create_dummy_data_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return self.create_dummy_data_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return path
def _snake_case ( self ):
"""simple docstring"""
return {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for single_url in single_urls:
download_callback(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Any = single_urls
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = [os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls]
else:
lowercase_ : Any = single_urls
lowercase_ : List[str] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) )
lowercase_ : Optional[Any] = value
# make sure that values are unique
if all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase_ : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase_ : Dict = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , __SCREAMING_SNAKE_CASE ) ) for url in data_url )
lowercase_ : Union[str, Any] = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase_ : Any = [data_url[0]] * len(__SCREAMING_SNAKE_CASE )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase_ : List[str] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(__SCREAMING_SNAKE_CASE )
return dummy_data_list
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase_ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(__SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _iter_archive_members(__SCREAMING_SNAKE_CASE ):
# this preserves the order of the members inside the ZIP archive
lowercase_ : List[str] = Path(self.dummy_file ).parent
lowercase_ : str = path.relative_to(__SCREAMING_SNAKE_CASE )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase_ : List[str] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = Path(__SCREAMING_SNAKE_CASE )
lowercase_ : int = _iter_archive_members(__SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(__SCREAMING_SNAKE_CASE ).as_posix(), file_path.open('''rb''' )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : int = [paths]
for path in paths:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(__SCREAMING_SNAKE_CASE ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 93 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : List[Any] = "▁"
_lowercase : Tuple = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowercase : List[str] = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
_lowercase : List[str] = {
"facebook/m2m100_418M": 1_0_2_4,
}
# fmt: off
_lowercase : Tuple = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="m2m100" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=8 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase_ : List[Any] = language_codes
lowercase_ : Optional[int] = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowercase_ : List[Any] = {lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code}
lowercase_ : Union[str, Any] = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__SCREAMING_SNAKE_CASE )
for lang_code in fairseq_language_code
if self.get_lang_token(__SCREAMING_SNAKE_CASE ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , language_codes=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowercase_ : int = vocab_file
lowercase_ : Any = load_json(__SCREAMING_SNAKE_CASE )
lowercase_ : str = {v: k for k, v in self.encoder.items()}
lowercase_ : Optional[int] = spm_file
lowercase_ : Any = load_spm(__SCREAMING_SNAKE_CASE , self.sp_model_kwargs )
lowercase_ : List[Any] = len(self.encoder )
lowercase_ : Dict = {
self.get_lang_token(__SCREAMING_SNAKE_CASE ): self.encoder_size + i for i, lang_code in enumerate(__SCREAMING_SNAKE_CASE )
}
lowercase_ : Optional[int] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__SCREAMING_SNAKE_CASE )}
lowercase_ : Union[str, Any] = {v: k for k, v in self.lang_token_to_id.items()}
lowercase_ : Tuple = src_lang if src_lang is not None else '''en'''
lowercase_ : Optional[int] = tgt_lang
lowercase_ : Any = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowercase_ : Dict = num_madeup_words
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _snake_case ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder[self.unk_token] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = []
lowercase_ : List[str] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowercase_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = [1] * len(self.prefix_tokens )
lowercase_ : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowercase_ : List[Any] = self.__dict__.copy()
lowercase_ : List[Any] = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : List[Any] = {}
lowercase_ : Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Tuple = Path(__SCREAMING_SNAKE_CASE )
if not save_dir.is_dir():
raise OSError(F'''{save_directory} should be a directory''' )
lowercase_ : Dict = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
lowercase_ : Dict = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __SCREAMING_SNAKE_CASE )
if os.path.abspath(self.spm_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.spm_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowercase_ : int = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (str(__SCREAMING_SNAKE_CASE ), str(__SCREAMING_SNAKE_CASE ))
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[Any] = src_lang
lowercase_ : List[str] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase_ : Tuple = src_lang
lowercase_ : Any = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = self.get_lang_id(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = tgt_lang_id
return inputs
def _snake_case ( self ):
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = self.get_lang_token(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = self.lang_token_to_id[lang_token]
lowercase_ : Optional[Any] = [self.cur_lang_id]
lowercase_ : Union[str, Any] = [self.eos_token_id]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = self.get_lang_token(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = self.lang_token_to_id[lang_token]
lowercase_ : str = [self.cur_lang_id]
lowercase_ : List[str] = [self.eos_token_id]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.lang_code_to_token[lang]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = self.get_lang_token(__SCREAMING_SNAKE_CASE )
return self.lang_token_to_id[lang_token]
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict[str, Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = sentencepiece.SentencePieceProcessor(**__SCREAMING_SNAKE_CASE )
spm.Load(str(__SCREAMING_SNAKE_CASE ) )
return spm
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
return json.load(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , indent=2 )
| 93 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase_ : int = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
lowercase_ : Union[str, Any] = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
lowercase_ : Union[str, Any] = model(input_ids.to(__SCREAMING_SNAKE_CASE ) , labels=labels.to(__SCREAMING_SNAKE_CASE ) ).loss
lowercase_ : int = -(labels.shape[-1] * loss.item())
lowercase_ : Any = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 93 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[Any] = "▁"
_lowercase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_lowercase : Optional[int] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
_lowercase : str = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
_lowercase : List[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
lowercase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
lowercase_ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ : Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ : str = 1
lowercase_ : str = len(self.sp_model )
lowercase_ : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__SCREAMING_SNAKE_CASE )
}
lowercase_ : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
lowercase_ : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase_ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase_ : Optional[Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase_ : Optional[Any] = src_lang if src_lang is not None else '''en_XX'''
lowercase_ : str = self.lang_code_to_id[self._src_lang]
lowercase_ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
lowercase_ : Optional[int] = self.__dict__.copy()
lowercase_ : Dict = None
lowercase_ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : Dict = {}
lowercase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _snake_case ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = [1] * len(self.prefix_tokens )
lowercase_ : Tuple = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Optional[int] = [self.sep_token_id]
lowercase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase_ : Optional[Any] = src_lang
lowercase_ : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = tgt_lang_id
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ : Any = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Tuple = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowercase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : List[str] = src_lang
lowercase_ : int = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = self.lang_code_to_id[src_lang]
lowercase_ : Optional[Any] = []
lowercase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = self.lang_code_to_id[lang]
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
| 93 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_lowercase : Optional[int] = None
_lowercase : Any = logging.get_logger(__name__)
_lowercase : List[str] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_lowercase : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
_lowercase : int = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
_lowercase : Dict = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = MBartTokenizer
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[int] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowercase_ : List[Any] = vocab_file
lowercase_ : str = False if not self.vocab_file else True
lowercase_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
lowercase_ : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase_ : Union[str, Any] = src_lang if src_lang is not None else '''en_XX'''
lowercase_ : int = self.convert_tokens_to_ids(self._src_lang )
lowercase_ : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _snake_case ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : List[Any] = [self.sep_token_id]
lowercase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase_ : str = src_lang
lowercase_ : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = tgt_lang_id
return inputs
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = src_lang
lowercase_ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
lowercase_ : str = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = []
lowercase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
lowercase_ : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : str = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowercase_ : List[str] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 93 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCAmelCase__ :
lowerCAmelCase_ = None
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ : Any = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : str = os.path.join(__SCREAMING_SNAKE_CASE , '''feat_extract.json''' )
feat_extract_first.to_json_file(__SCREAMING_SNAKE_CASE )
lowercase_ : str = self.feature_extraction_class.from_json_file(__SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : Union[str, Any] = feat_extract_first.save_pretrained(__SCREAMING_SNAKE_CASE )[0]
check_json_file_has_correct_format(__SCREAMING_SNAKE_CASE )
lowercase_ : str = self.feature_extraction_class.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = self.feature_extraction_class()
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 93 | 1 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(__SCREAMING_SNAKE_CASE ) for s in shape] )}.npy'''
def _snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
def _snake_case ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=(4, 4, 64, 64) , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase_ : Any = jnp.bfloataa if fpaa else jnp.floataa
lowercase_ : int = jnp.array(load_hf_numpy(self.get_file_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) , dtype=__SCREAMING_SNAKE_CASE )
return image
def _snake_case ( self , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="CompVis/stable-diffusion-v1-4" ):
"""simple docstring"""
lowercase_ : Union[str, Any] = jnp.bfloataa if fpaa else jnp.floataa
lowercase_ : Union[str, Any] = '''bf16''' if fpaa else None
lowercase_ , lowercase_ : Dict = FlaxUNetaDConditionModel.from_pretrained(
__SCREAMING_SNAKE_CASE , subfolder='''unet''' , dtype=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE )
return model, params
def _snake_case ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=(4, 77, 7_68) , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase_ : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
lowercase_ : List[str] = jnp.array(load_hf_numpy(self.get_file_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) , dtype=__SCREAMING_SNAKE_CASE )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 10_00, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ , lowercase_ : Tuple = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = self.get_latents(__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.get_encoder_hidden_states(__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = model.apply(
{'''params''': params} , __SCREAMING_SNAKE_CASE , jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.intaa ) , encoder_hidden_states=__SCREAMING_SNAKE_CASE , ).sample
assert sample.shape == latents.shape
lowercase_ : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowercase_ : Tuple = jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 10_00, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ , lowercase_ : Union[str, Any] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = self.get_latents(__SCREAMING_SNAKE_CASE , shape=(4, 4, 96, 96) , fpaa=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.get_encoder_hidden_states(__SCREAMING_SNAKE_CASE , shape=(4, 77, 10_24) , fpaa=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = model.apply(
{'''params''': params} , __SCREAMING_SNAKE_CASE , jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.intaa ) , encoder_hidden_states=__SCREAMING_SNAKE_CASE , ).sample
assert sample.shape == latents.shape
lowercase_ : Optional[int] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowercase_ : List[Any] = jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-2 )
| 93 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''pix2struct_text_model'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=5_02_44 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=20_48 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_28 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1E-6 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = vocab_size
lowercase_ : Tuple = hidden_size
lowercase_ : Optional[Any] = d_kv
lowercase_ : List[str] = d_ff
lowercase_ : List[str] = num_layers
lowercase_ : Optional[Any] = num_heads
lowercase_ : Union[str, Any] = relative_attention_num_buckets
lowercase_ : Optional[int] = relative_attention_max_distance
lowercase_ : Union[str, Any] = dropout_rate
lowercase_ : Dict = layer_norm_epsilon
lowercase_ : Dict = initializer_factor
lowercase_ : List[Any] = use_cache
lowercase_ : Optional[int] = eos_token_id
lowercase_ : Optional[int] = decoder_start_token_id
# for backwards compatibility
lowercase_ : Any = dense_act_fn
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , tie_word_embeddings=__SCREAMING_SNAKE_CASE , is_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : Optional[int] = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
lowercase_ : List[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''pix2struct_vision_model'''
def __init__( self , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=20_48 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=1E-6 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=1E-1_0 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=40_96 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_28 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : Any = patch_embed_hidden_size
lowercase_ : List[Any] = d_ff
lowercase_ : Dict = dropout_rate
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : int = initializer_range
lowercase_ : Dict = initializer_factor
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[Any] = layer_norm_eps
lowercase_ : str = dense_act_fn
lowercase_ : Dict = seq_len
lowercase_ : List[Any] = relative_attention_num_buckets
lowercase_ : int = relative_attention_max_distance
lowercase_ : Optional[int] = d_kv
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : str = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
lowercase_ : Optional[int] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''pix2struct'''
lowerCAmelCase_ = True
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text_config is None:
lowercase_ : Optional[Any] = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
lowercase_ : Dict = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
lowercase_ : str = PixaStructTextConfig(**__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = PixaStructVisionConfig(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.text_config.decoder_start_token_id
lowercase_ : Union[str, Any] = self.text_config.pad_token_id
lowercase_ : Union[str, Any] = self.text_config.eos_token_id
lowercase_ : int = initializer_factor
lowercase_ : Any = initializer_range
lowercase_ : str = self.initializer_range
lowercase_ : str = self.initializer_range
lowercase_ : int = is_vqa
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = copy.deepcopy(self.__dict__ )
lowercase_ : Any = self.text_config.to_dict()
lowercase_ : Optional[Any] = self.vision_config.to_dict()
lowercase_ : Optional[int] = self.__class__.model_type
return output
| 93 | 1 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_lowercase : Optional[int] = False
try:
_lowercase : Optional[int] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = [] ):
"""simple docstring"""
lowercase_ : Tuple = 0
lowercase_ : Dict = choices
lowercase_ : Tuple = prompt
if sys.platform == "win32":
lowercase_ : Union[str, Any] = '''*'''
else:
lowercase_ : Optional[int] = '''➔ '''
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "" ):
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __SCREAMING_SNAKE_CASE )
else:
forceWrite(self.choices[index] , __SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index == self.position:
forceWrite(F''' {self.arrow_char} ''' )
self.write_choice(__SCREAMING_SNAKE_CASE )
else:
forceWrite(F''' {self.choices[index]}''' )
reset_cursor()
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1 ):
"""simple docstring"""
lowercase_ : List[str] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__SCREAMING_SNAKE_CASE )
move_cursor(__SCREAMING_SNAKE_CASE , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def _snake_case ( self ):
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def _snake_case ( self ):
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def _snake_case ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def _snake_case ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__SCREAMING_SNAKE_CASE )] for number in range(10 )] )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = int(chr(self.current_selection ) )
lowercase_ : int = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __SCREAMING_SNAKE_CASE )
else:
return
else:
return
def _snake_case ( self , __SCREAMING_SNAKE_CASE = 0 ):
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
lowercase_ : List[str] = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__SCREAMING_SNAKE_CASE )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
lowercase_ : Union[str, Any] = int(builtins.input() )
except ValueError:
lowercase_ : List[Any] = default_choice
else:
lowercase_ : Union[str, Any] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(__SCREAMING_SNAKE_CASE , '''\n''' )
return choice
| 93 |
'''simple docstring'''
from math import isqrt, loga
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Any = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = False
return [i for i in range(2 , __SCREAMING_SNAKE_CASE ) if is_prime[i]]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 800800 , __SCREAMING_SNAKE_CASE : int = 800800 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = degree * loga(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = int(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = calculate_prime_numbers(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = 0
lowercase_ : List[Any] = 0
lowercase_ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 93 | 1 |
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCAmelCase__ ( lowerCamelCase_ ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''num_attention_heads''' ) )
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=[1_28, 2_56, 3_84] , __SCREAMING_SNAKE_CASE=[4, 6, 8] , __SCREAMING_SNAKE_CASE=[2, 3, 4] , __SCREAMING_SNAKE_CASE=[16, 16, 16] , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ):
"""simple docstring"""
lowercase_ : Dict = parent
lowercase_ : int = batch_size
lowercase_ : List[Any] = image_size
lowercase_ : int = num_channels
lowercase_ : Union[str, Any] = kernel_size
lowercase_ : int = stride
lowercase_ : Optional[int] = padding
lowercase_ : List[Any] = hidden_sizes
lowercase_ : Any = num_attention_heads
lowercase_ : Tuple = depths
lowercase_ : int = key_dim
lowercase_ : Optional[Any] = drop_path_rate
lowercase_ : Optional[Any] = patch_size
lowercase_ : Any = attention_ratio
lowercase_ : Union[str, Any] = mlp_ratio
lowercase_ : Optional[int] = initializer_range
lowercase_ : Union[str, Any] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase_ : Union[str, Any] = is_training
lowercase_ : Optional[int] = use_labels
lowercase_ : int = num_labels
lowercase_ : List[Any] = initializer_range
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Dict = None
if self.use_labels:
lowercase_ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : str = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ):
"""simple docstring"""
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = LevitModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : List[Any] = model(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = (self.image_size, self.image_size)
lowercase_ , lowercase_ : Any = image_size[0], image_size[1]
for _ in range(4 ):
lowercase_ : int = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase_ : Any = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : int = self.num_labels
lowercase_ : Dict = LevitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : Optional[int] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : List[Any] = config_and_inputs
lowercase_ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = LevitModelTester(self )
lowercase_ : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _snake_case ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self ):
"""simple docstring"""
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Any = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : str = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
lowercase_ : Optional[Any] = outputs.hidden_states
lowercase_ : Any = len(self.model_tester.depths ) + 1
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
lowercase_ : int = (self.model_tester.image_size, self.model_tester.image_size)
lowercase_ , lowercase_ : List[Any] = image_size[0], image_size[1]
for _ in range(4 ):
lowercase_ : Optional[int] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase_ : List[str] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[int] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase_ : Optional[int] = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__SCREAMING_SNAKE_CASE )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase_ : str = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
lowercase_ : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ : Union[str, Any] = False
lowercase_ : int = True
for model_class in self.all_model_classes:
if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase_ : Optional[Any] = model_class(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(__SCREAMING_SNAKE_CASE )
model.train()
lowercase_ : int = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__SCREAMING_SNAKE_CASE ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
lowercase_ : Any = problem_type['''title''']
lowercase_ : List[Any] = problem_type['''num_labels''']
lowercase_ : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
lowercase_ : Any = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if problem_type["num_labels"] > 1:
lowercase_ : str = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
lowercase_ : Optional[Any] = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__SCREAMING_SNAKE_CASE ) as warning_list:
lowercase_ : List[Any] = model(**__SCREAMING_SNAKE_CASE ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = LevitModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def _snake_case ( self ):
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__SCREAMING_SNAKE_CASE )
lowercase_ : str = self.default_image_processor
lowercase_ : Dict = prepare_img()
lowercase_ : List[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase_ : List[Any] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
lowercase_ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 93 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase : int = logging.get_logger(__name__)
_lowercase : List[Any] = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase_ = '''nat'''
lowerCAmelCase_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=[3, 4, 6, 5] , __SCREAMING_SNAKE_CASE=[2, 4, 8, 16] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = patch_size
lowercase_ : List[Any] = num_channels
lowercase_ : str = embed_dim
lowercase_ : List[str] = depths
lowercase_ : str = len(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = num_heads
lowercase_ : int = kernel_size
lowercase_ : Union[str, Any] = mlp_ratio
lowercase_ : Optional[int] = qkv_bias
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : Optional[int] = attention_probs_dropout_prob
lowercase_ : List[Any] = drop_path_rate
lowercase_ : List[Any] = hidden_act
lowercase_ : int = layer_norm_eps
lowercase_ : int = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase_ : Dict = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
lowercase_ : Tuple = layer_scale_init_value
lowercase_ : Union[str, Any] = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
lowercase_ , lowercase_ : int = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 93 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase : int = logging.get_logger(__name__)
_lowercase : List[Any] = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase_ = '''nat'''
lowerCAmelCase_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=[3, 4, 6, 5] , __SCREAMING_SNAKE_CASE=[2, 4, 8, 16] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = patch_size
lowercase_ : List[Any] = num_channels
lowercase_ : str = embed_dim
lowercase_ : List[str] = depths
lowercase_ : str = len(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = num_heads
lowercase_ : int = kernel_size
lowercase_ : Union[str, Any] = mlp_ratio
lowercase_ : Optional[int] = qkv_bias
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : Optional[int] = attention_probs_dropout_prob
lowercase_ : List[Any] = drop_path_rate
lowercase_ : List[Any] = hidden_act
lowercase_ : int = layer_norm_eps
lowercase_ : int = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase_ : Dict = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
lowercase_ : Tuple = layer_scale_init_value
lowercase_ : Union[str, Any] = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
lowercase_ , lowercase_ : int = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 93 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Union[str, Any] = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 93 | 1 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : int = {"vocab_file": "spiece.model"}
_lowercase : List[str] = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
_lowercase : Optional[int] = {
"t5-small": 5_1_2,
"t5-base": 5_1_2,
"t5-large": 5_1_2,
"t5-3b": 5_1_2,
"t5-11b": 5_1_2,
}
_lowercase : Tuple = "▁"
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE=1_00 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
lowercase_ : str = [F'''<extra_id_{i}>''' for i in range(__SCREAMING_SNAKE_CASE )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowercase_ : List[Any] = len(set(filter(lambda __SCREAMING_SNAKE_CASE : bool('''extra_id''' in str(__SCREAMING_SNAKE_CASE ) ) , __SCREAMING_SNAKE_CASE ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
lowercase_ : Tuple = legacy
lowercase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , extra_ids=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowercase_ : str = vocab_file
lowercase_ : Union[str, Any] = extra_ids
lowercase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@staticmethod
def _snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
lowercase_ : int = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __SCREAMING_SNAKE_CASE , )
return max_model_length
@property
def _snake_case ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def _snake_case ( self ):
"""simple docstring"""
return list(
set(filter(lambda __SCREAMING_SNAKE_CASE : bool(re.search(R'''<extra_id_\d+>''' , __SCREAMING_SNAKE_CASE ) ) is not None , self.additional_special_tokens ) ) )
def _snake_case ( self ):
"""simple docstring"""
return [self._convert_token_to_id(__SCREAMING_SNAKE_CASE ) for token in self.get_sentinel_tokens()]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(__SCREAMING_SNAKE_CASE ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Optional[int] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : int = self._add_eos_if_not_present(__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return token_ids_a
else:
lowercase_ : str = self._add_eos_if_not_present(__SCREAMING_SNAKE_CASE )
return token_ids_a + token_ids_a
def __getstate__( self ):
"""simple docstring"""
lowercase_ : List[Any] = self.__dict__.copy()
lowercase_ : Any = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : Union[str, Any] = {}
lowercase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not self.legacy:
lowercase_ : Any = SPIECE_UNDERLINE + text.replace(__SCREAMING_SNAKE_CASE , ''' ''' )
return super().tokenize(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not self.legacy:
lowercase_ : Optional[int] = text.startswith(__SCREAMING_SNAKE_CASE )
if is_first:
lowercase_ : List[Any] = text[1:]
lowercase_ : Dict = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[int] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token.startswith('''<extra_id_''' ):
lowercase_ : List[str] = re.match(R'''<extra_id_(\d+)>''' , __SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
lowercase_ : List[str] = self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Optional[int] = F'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = []
lowercase_ : str = ''''''
lowercase_ : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowercase_ : List[Any] = True
lowercase_ : Union[str, Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : List[Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowercase_ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 93 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = [10, 20, 30, 40, 50, 60]
lowercase_ : Optional[Any] = [2, 4, 6, 8, 10, 12]
lowercase_ : Union[str, Any] = 1_00
self.assertEqual(kp.calc_profit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , 2_10 )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Weight can not be negative.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Profit can not be negative.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''' )
def _snake_case ( self ):
"""simple docstring"""
self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 93 | 1 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
_lowercase : Union[str, Any] = ""
_lowercase : int = ""
_lowercase : str = ""
_lowercase : List[str] = ""
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : List[str] = tweepy.OAuthHandler(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
auth.set_access_token(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = tweepy.API(__SCREAMING_SNAKE_CASE )
# initialize a list to hold all the tweepy Tweets
lowercase_ : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowercase_ : str = api.user_timeline(screen_name=__SCREAMING_SNAKE_CASE , count=200 )
# save most recent tweets
alltweets.extend(__SCREAMING_SNAKE_CASE )
# save the id of the oldest tweet less one
lowercase_ : int = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__SCREAMING_SNAKE_CASE ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
lowercase_ : Tuple = api.user_timeline(
screen_name=__SCREAMING_SNAKE_CASE , count=200 , max_id=__SCREAMING_SNAKE_CASE )
# save most recent tweets
alltweets.extend(__SCREAMING_SNAKE_CASE )
# update the id of the oldest tweet less one
lowercase_ : Dict = alltweets[-1].id - 1
print(F'''...{len(__SCREAMING_SNAKE_CASE )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
lowercase_ : str = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
lowercase_ : List[str] = csv.writer(__SCREAMING_SNAKE_CASE )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 93 |
'''simple docstring'''
import argparse
import copy
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
lowercase_ : List[Any] = {}
with open(__SCREAMING_SNAKE_CASE ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase_ : Union[str, Any] = []
_list.append([line.split()[1], line.split()[2]] )
lowercase_ : str = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase_ : Optional[int] = []
_list.append([line.split()[0], line.split()[2]] )
lowercase_ : Dict = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE ) as f:
lowercase_ : List[str] = f.read(1 )
lowercase_ : Optional[int] = start_node
lowercase_ : Any = []
lowercase_ : List[str] = start_node
lowercase_ : Optional[Any] = 0
while visiting not in first_solution:
lowercase_ : Any = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__SCREAMING_SNAKE_CASE ) and k[0] not in first_solution:
lowercase_ : List[Any] = k[1]
lowercase_ : List[Any] = k[0]
first_solution.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = distance_of_first_solution + int(__SCREAMING_SNAKE_CASE )
lowercase_ : int = best_node
first_solution.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase_ : Optional[Any] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
lowercase_ : Tuple = []
for n in solution[1:-1]:
lowercase_ : List[str] = solution.index(__SCREAMING_SNAKE_CASE )
for kn in solution[1:-1]:
lowercase_ : Any = solution.index(__SCREAMING_SNAKE_CASE )
if n == kn:
continue
lowercase_ : Dict = copy.deepcopy(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = kn
lowercase_ : List[Any] = n
lowercase_ : str = 0
for k in _tmp[:-1]:
lowercase_ : Tuple = _tmp[_tmp.index(__SCREAMING_SNAKE_CASE ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase_ : Optional[Any] = distance + int(i[1] )
_tmp.append(__SCREAMING_SNAKE_CASE )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase_ : Union[str, Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __SCREAMING_SNAKE_CASE : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = 1
lowercase_ : List[str] = first_solution
lowercase_ : Dict = []
lowercase_ : List[str] = distance_of_first_solution
lowercase_ : Optional[Any] = solution
while count <= iters:
lowercase_ : int = find_neighborhood(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = 0
lowercase_ : Dict = neighborhood[index_of_best_solution]
lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) - 1
lowercase_ : Tuple = False
while not found:
lowercase_ : Optional[int] = 0
while i < len(__SCREAMING_SNAKE_CASE ):
if best_solution[i] != solution[i]:
lowercase_ : Tuple = best_solution[i]
lowercase_ : Optional[int] = solution[i]
break
lowercase_ : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase_ : Tuple = True
lowercase_ : Optional[int] = best_solution[:-1]
lowercase_ : Optional[Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase_ : Optional[Any] = cost
lowercase_ : int = solution
else:
lowercase_ : Any = index_of_best_solution + 1
lowercase_ : Any = neighborhood[index_of_best_solution]
if len(__SCREAMING_SNAKE_CASE ) >= size:
tabu_list.pop(0 )
lowercase_ : List[Any] = count + 1
return best_solution_ever, best_cost
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str]=None ):
"""simple docstring"""
lowercase_ : Any = generate_neighbours(args.File )
lowercase_ , lowercase_ : Union[str, Any] = generate_first_solution(
args.File , __SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : Optional[int] = tabu_search(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 93 | 1 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : dict ):
"""simple docstring"""
lowercase_ : List[Any] = set()
# edges = list of graph's edges
lowercase_ : Union[str, Any] = get_edges(__SCREAMING_SNAKE_CASE )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowercase_ , lowercase_ : Tuple = edges.pop()
chosen_vertices.add(__SCREAMING_SNAKE_CASE )
chosen_vertices.add(__SCREAMING_SNAKE_CASE )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__SCREAMING_SNAKE_CASE )
return chosen_vertices
def snake_case_ ( __SCREAMING_SNAKE_CASE : dict ):
"""simple docstring"""
lowercase_ : Any = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 93 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase_ : int = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
lowercase_ : Union[str, Any] = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
lowercase_ : Union[str, Any] = model(input_ids.to(__SCREAMING_SNAKE_CASE ) , labels=labels.to(__SCREAMING_SNAKE_CASE ) ).loss
lowercase_ : int = -(labels.shape[-1] * loss.item())
lowercase_ : Any = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 93 | 1 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : List[str] = len(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = []
for i in range(len(__SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowercase_ : Tuple = True
for j in range(__SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowercase_ : List[str] = False
break
if match_found:
position.append(__SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 93 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : List[str] = len(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = []
for i in range(len(__SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowercase_ : Tuple = True
for j in range(__SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowercase_ : List[str] = False
break
if match_found:
position.append(__SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 93 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
@property
def _snake_case ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = ort.SessionOptions()
lowercase_ : Any = False
return options
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowercase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowercase_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
lowercase_ : Any = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = '''A red cat sitting on a park bench'''
lowercase_ : Any = np.random.RandomState(0 )
lowercase_ : Dict = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
lowercase_ : str = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 93 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_lowercase : Optional[Any] = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
_lowercase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Tuple = '''https://pypi.org/pypi/diffusers/json'''
lowercase_ : Tuple = json.loads(request.urlopen(__SCREAMING_SNAKE_CASE ).read() )['''releases'''].keys()
return sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : version.Version(__SCREAMING_SNAKE_CASE ) )
def snake_case_ ( ):
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__SCREAMING_SNAKE_CASE )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = Path(__SCREAMING_SNAKE_CASE ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
lowercase_ : str = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : int = f.read()
# Imports of the form `import .xxx`
lowercase_ : List[Any] = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Unique-ify
return list(set(__SCREAMING_SNAKE_CASE ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : int = False
lowercase_ : Any = [module_file]
lowercase_ : Dict = []
# Let's recurse through all relative imports
while not no_change:
lowercase_ : Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Union[str, Any] = Path(__SCREAMING_SNAKE_CASE ).parent
lowercase_ : Optional[int] = [str(module_path / m ) for m in new_imports]
lowercase_ : str = [f for f in new_import_files if f not in all_relative_imports]
lowercase_ : int = [F'''{f}.py''' for f in new_import_files]
lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) == 0
all_relative_imports.extend(__SCREAMING_SNAKE_CASE )
return all_relative_imports
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : Union[str, Any] = f.read()
# Imports of the form `import xxx`
lowercase_ : Any = re.findall('''^\s*import\s+(\S+)\s*$''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Only keep the top-level module
lowercase_ : List[str] = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
lowercase_ : Any = list(set(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Optional[Any] = []
for imp in imports:
try:
importlib.import_module(__SCREAMING_SNAKE_CASE )
except ImportError:
missing_packages.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F'''{', '.join(__SCREAMING_SNAKE_CASE )}. Run `pip install {' '.join(__SCREAMING_SNAKE_CASE )}`''' )
return get_relative_imports(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
lowercase_ : List[Any] = module_path.replace(os.path.sep , '''.''' )
lowercase_ : Any = importlib.import_module(__SCREAMING_SNAKE_CASE )
if class_name is None:
return find_pipeline_class(__SCREAMING_SNAKE_CASE )
return getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
lowercase_ : int = dict(inspect.getmembers(__SCREAMING_SNAKE_CASE , inspect.isclass ) )
lowercase_ : Optional[Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __SCREAMING_SNAKE_CASE )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
lowercase_ : List[Any] = cls
return pipeline_class
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = False , ):
"""simple docstring"""
lowercase_ : Dict = str(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = module_file_or_url
lowercase_ : int = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
lowercase_ : Optional[int] = get_diffusers_versions()
# cut ".dev0"
lowercase_ : List[Any] = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
lowercase_ : List[str] = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
lowercase_ : List[str] = F'''v{revision}'''
elif revision == "main":
lowercase_ : Optional[Any] = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {', '.join(available_versions + ['main'] )}.''' )
# community pipeline on GitHub
lowercase_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=__SCREAMING_SNAKE_CASE , pipeline=__SCREAMING_SNAKE_CASE )
try:
lowercase_ : Optional[Any] = cached_download(
__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , )
lowercase_ : Tuple = '''git'''
lowercase_ : Tuple = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
lowercase_ : str = hf_hub_download(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[Any] = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
lowercase_ : Tuple = check_imports(__SCREAMING_SNAKE_CASE )
# Now we move the module inside our cached dynamic modules.
lowercase_ : str = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = Path(__SCREAMING_SNAKE_CASE ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__SCREAMING_SNAKE_CASE , submodule_path / module_file )
for module_needed in modules_needed:
lowercase_ : Union[str, Any] = F'''{module_needed}.py'''
shutil.copy(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = use_auth_token
elif use_auth_token is True:
lowercase_ : List[Any] = HfFolder.get_token()
else:
lowercase_ : Optional[Any] = None
lowercase_ : Optional[int] = model_info(__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , token=__SCREAMING_SNAKE_CASE ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowercase_ : int = submodule_path / commit_hash
lowercase_ : Tuple = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__SCREAMING_SNAKE_CASE )
if not (submodule_path / module_file).exists():
shutil.copy(__SCREAMING_SNAKE_CASE , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__SCREAMING_SNAKE_CASE , F'''{module_needed}.py''' , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
return os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
"""simple docstring"""
lowercase_ : Optional[Any] = get_cached_module_file(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
return get_class_in_module(__SCREAMING_SNAKE_CASE , final_module.replace('''.py''' , '''''' ) )
| 93 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_lowercase : List[Any] = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
@classmethod
def _snake_case ( cls ):
"""simple docstring"""
lowercase_ : Optional[int] = TOKEN
HfFolder.save_token(__SCREAMING_SNAKE_CASE )
@classmethod
def _snake_case ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowercase_ : Union[str, Any] = FlaxBertModel(__SCREAMING_SNAKE_CASE )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
lowercase_ : Tuple = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
lowercase_ : List[Any] = flatten_dict(unfreeze(model.params ) )
lowercase_ : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ : Optional[int] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__SCREAMING_SNAKE_CASE , repo_id='''test-model-flax''' , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token )
lowercase_ : List[str] = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
lowercase_ : List[str] = flatten_dict(unfreeze(model.params ) )
lowercase_ : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowercase_ : Dict = FlaxBertModel(__SCREAMING_SNAKE_CASE )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
lowercase_ : Dict = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowercase_ : int = flatten_dict(unfreeze(model.params ) )
lowercase_ : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ : List[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token )
lowercase_ : Dict = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowercase_ : List[str] = flatten_dict(unfreeze(model.params ) )
lowercase_ : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowercase_ : Optional[int] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
lowercase_ : int = True
lowercase_ : str = flatten_dict(modela.params )
lowercase_ : List[str] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
lowercase_ : Any = False
return models_are_equal
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowercase_ : List[str] = FlaxBertModel(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : int = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowercase_ : Optional[Any] = FlaxBertModel(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , max_shard_size='''10KB''' )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
lowercase_ : List[str] = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : int = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = '''bert'''
lowercase_ : Union[str, Any] = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = '''bert'''
lowercase_ : Any = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = FlaxBertModel.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 93 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
_lowercase : Union[str, Any] = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
_lowercase : int = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_lowercase : Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Any = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_lowercase : str = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : List[str] = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
_lowercase : Tuple = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[Any] = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
_lowercase : Tuple = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[int] = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
_lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
_lowercase : Optional[int] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
_lowercase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
_lowercase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
_lowercase : int = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
_lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
_lowercase : int = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
_lowercase : List[str] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : str = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
_lowercase : Dict = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
_lowercase : List[str] = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
_lowercase : str = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
_lowercase : List[Any] = ""
_lowercase : Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
_lowercase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_lowercase : Optional[Any] = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
assert ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(expected_error.format(path='''root''' ) ) ):
lowercase_ : Optional[int] = ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
ReadMe.from_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , suppress_parsing_errors=__SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : str = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = expected_error.format(path=__SCREAMING_SNAKE_CASE )
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(__SCREAMING_SNAKE_CASE ) ):
lowercase_ : int = ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Dict = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = expected_error.format(path=__SCREAMING_SNAKE_CASE )
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(__SCREAMING_SNAKE_CASE ) ):
ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / '''README.md'''
with open(__SCREAMING_SNAKE_CASE , '''w+''' ) as readme_file:
readme_file.write(__SCREAMING_SNAKE_CASE )
ReadMe.from_readme(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , suppress_parsing_errors=__SCREAMING_SNAKE_CASE )
| 93 | 1 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : str = sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : List[Any] = [i[0] for i in r], [i[1] for i in r]
lowercase_ : List[str] = list(accumulate(__SCREAMING_SNAKE_CASE ) )
lowercase_ : List[Any] = bisect(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = eval_examples
lowercase_ : Tuple = post_process_function
def _snake_case ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "eval" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[int] = gen_kwargs.copy()
lowercase_ : List[str] = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase_ : str = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase_ : Dict = gen_kwargs
lowercase_ : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase_ : List[str] = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase_ : Union[str, Any] = self.compute_metrics
lowercase_ : Optional[int] = None
lowercase_ : Tuple = time.time()
lowercase_ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase_ : str = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
lowercase_ : Any = compute_metrics
lowercase_ : Any = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase_ : Optional[Any] = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase_ : List[Any] = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
lowercase_ : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase_ : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE )
return metrics
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Union[str, Any] = gen_kwargs.copy()
lowercase_ : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase_ : Optional[Any] = self.compute_metrics
lowercase_ : Optional[int] = None
lowercase_ : List[Any] = time.time()
lowercase_ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase_ : Tuple = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
lowercase_ : Any = compute_metrics
lowercase_ : Tuple = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase_ : Any = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''' )
lowercase_ : str = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase_ : Optional[int] = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
| 93 | 1 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowercase : int = object()
# For specifying empty leaf dict `{}`
_lowercase : List[Any] = object()
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
lowercase_ : List[Any] = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(__SCREAMING_SNAKE_CASE ) - len(__SCREAMING_SNAKE_CASE ) + 1 ):
lowercase_ : Dict = [x.match(__SCREAMING_SNAKE_CASE ) for x, y in zip(__SCREAMING_SNAKE_CASE , ks[i:] )]
if matches and all(__SCREAMING_SNAKE_CASE ):
return True
return False
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
def replace(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] ):
for rule, replacement in rules:
if _match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return replacement
return val
return replace
def snake_case_ ( ):
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , __SCREAMING_SNAKE_CASE )),
(("transformer", "wte", "embedding"), P('''mp''' , __SCREAMING_SNAKE_CASE )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__SCREAMING_SNAKE_CASE , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , __SCREAMING_SNAKE_CASE )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__SCREAMING_SNAKE_CASE , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , __SCREAMING_SNAKE_CASE )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
lowercase_ : Optional[Any] = _get_partition_rules()
lowercase_ : List[Any] = _replacement_rules(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = {k: _unmatched for k in flatten_dict(__SCREAMING_SNAKE_CASE )}
lowercase_ : Optional[Any] = {k: replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__SCREAMING_SNAKE_CASE ) )
| 93 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase : List[str] = ["text", "image", "audio"]
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
lowercase_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
inputs.append(create_inputs(__SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def snake_case_ ( __SCREAMING_SNAKE_CASE : List ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
for output in outputs:
if isinstance(__SCREAMING_SNAKE_CASE , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(__SCREAMING_SNAKE_CASE , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(__SCREAMING_SNAKE_CASE , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class lowerCAmelCase__ :
def _snake_case ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , __SCREAMING_SNAKE_CASE ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = create_inputs(self.tool.inputs )
lowercase_ : Tuple = self.tool(*__SCREAMING_SNAKE_CASE )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Any = [outputs]
self.assertListEqual(output_types(__SCREAMING_SNAKE_CASE ) , self.tool.outputs )
def _snake_case ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = create_inputs(self.tool.inputs )
lowercase_ : int = self.tool(*__SCREAMING_SNAKE_CASE )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = [outputs]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
for output, output_type in zip(__SCREAMING_SNAKE_CASE , self.tool.outputs ):
lowercase_ : Optional[int] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : int = []
for _input, input_type in zip(__SCREAMING_SNAKE_CASE , self.tool.inputs ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Optional[Any] = self.tool(*__SCREAMING_SNAKE_CASE )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = [outputs]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
| 93 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict=None ):
"""simple docstring"""
lowercase_ : Any = argparse.ArgumentParser(add_help=__SCREAMING_SNAKE_CASE , allow_abbrev=__SCREAMING_SNAKE_CASE )
# The main config parser
lowercase_ : List[Any] = config_command_parser(__SCREAMING_SNAKE_CASE )
# The subparser to add commands to
lowercase_ : int = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(__SCREAMING_SNAKE_CASE , parents=[parent_parser] )
update_command_parser(__SCREAMING_SNAKE_CASE , parents=[parent_parser] )
return config_parser
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : int = get_config_parser()
lowercase_ : List[Any] = config_parser.parse_args()
if not hasattr(__SCREAMING_SNAKE_CASE , '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 93 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase__ :
lowerCAmelCase_ = 42
# setable values
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(common=__SCREAMING_SNAKE_CASE , init_noise_sigma=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
@dataclass
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = 42
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCAmelCase_ = 42
@property
def _snake_case ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , __SCREAMING_SNAKE_CASE = 10_00 , __SCREAMING_SNAKE_CASE = 0.0_001 , __SCREAMING_SNAKE_CASE = 0.02 , __SCREAMING_SNAKE_CASE = "linear" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "fixed_small" , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "epsilon" , __SCREAMING_SNAKE_CASE = jnp.floataa , ):
"""simple docstring"""
lowercase_ : Dict = dtype
def _snake_case ( self , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if common is None:
lowercase_ : Tuple = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase_ : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype )
lowercase_ : List[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__SCREAMING_SNAKE_CASE , init_noise_sigma=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
return sample
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = () ):
"""simple docstring"""
lowercase_ : Optional[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase_ : int = (jnp.arange(0 , __SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
lowercase_ : List[Any] = state.common.alphas_cumprod[t]
lowercase_ : str = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase_ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase_ : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase_ : int = jnp.clip(__SCREAMING_SNAKE_CASE , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase_ : List[str] = jnp.log(jnp.clip(__SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase_ : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase_ : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase_ : Optional[Any] = variance
lowercase_ : Union[str, Any] = state.common.betas[t]
lowercase_ : Union[str, Any] = (predicted_variance + 1) / 2
lowercase_ : Any = frac * max_log + (1 - frac) * min_log
return variance
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
lowercase_ : Optional[int] = timestep
if key is None:
lowercase_ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase_ , lowercase_ : Optional[Any] = jnp.split(__SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
lowercase_ : int = None
# 1. compute alphas, betas
lowercase_ : Any = state.common.alphas_cumprod[t]
lowercase_ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase_ : int = 1 - alpha_prod_t
lowercase_ : str = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase_ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase_ : Any = model_output
elif self.config.prediction_type == "v_prediction":
lowercase_ : List[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase_ : Optional[Any] = jnp.clip(__SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase_ : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase_ : Optional[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase_ : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase_ : str = jax.random.split(__SCREAMING_SNAKE_CASE , num=1 )
lowercase_ : List[Any] = jax.random.normal(__SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , predicted_variance=__SCREAMING_SNAKE_CASE ) ** 0.5) * noise
lowercase_ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase_ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__SCREAMING_SNAKE_CASE , state=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return add_noise_common(state.common , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return get_velocity_common(state.common , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 93 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCAmelCase__ ( nn.Module ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = jnp.floataa
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = hidden_states.shape
lowercase_ : Dict = jax.image.resize(
__SCREAMING_SNAKE_CASE , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
lowercase_ : Optional[Any] = self.conv(__SCREAMING_SNAKE_CASE )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = jnp.floataa
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = self.conv(__SCREAMING_SNAKE_CASE )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = None
lowerCAmelCase_ = jnp.floataa
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = self.in_channels if self.out_channels is None else self.out_channels
lowercase_ : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowercase_ : int = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase_ : Optional[Any] = nn.Dense(__SCREAMING_SNAKE_CASE , dtype=self.dtype )
lowercase_ : List[str] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowercase_ : str = nn.Dropout(self.dropout_prob )
lowercase_ : Optional[int] = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase_ : Union[str, Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowercase_ : List[Any] = None
if use_nin_shortcut:
lowercase_ : Tuple = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
lowercase_ : Union[str, Any] = hidden_states
lowercase_ : List[str] = self.norma(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = nn.swish(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = self.conva(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = self.time_emb_proj(nn.swish(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Tuple = jnp.expand_dims(jnp.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , 1 )
lowercase_ : Dict = hidden_states + temb
lowercase_ : Dict = self.norma(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = nn.swish(__SCREAMING_SNAKE_CASE )
lowercase_ : int = self.dropout(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = self.conva(__SCREAMING_SNAKE_CASE )
if self.conv_shortcut is not None:
lowercase_ : Any = self.conv_shortcut(__SCREAMING_SNAKE_CASE )
return hidden_states + residual
| 93 |
'''simple docstring'''
_lowercase : int = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowercase : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
_lowercase : List[str] = True
_lowercase : Optional[int] = False
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowercase_ : Tuple = chain(next_number(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Union[str, Any] = number_chain
while number < 10000000:
lowercase_ : int = number_chain
number *= 10
return number_chain
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 10000000 ):
"""simple docstring"""
for i in range(1 , __SCREAMING_SNAKE_CASE ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 93 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowercase : str = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
_lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 93 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Union[str, Any] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 93 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_lowercase : Union[str, Any] = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
_lowercase : Union[str, Any] = {
"roberta-base": 5_1_2,
"roberta-large": 5_1_2,
"roberta-large-mnli": 5_1_2,
"distilroberta-base": 5_1_2,
"roberta-base-openai-detector": 5_1_2,
"roberta-large-openai-detector": 5_1_2,
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = RobertaTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowercase_ : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
lowercase_ : Optional[int] = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
lowercase_ : Dict = add_prefix_space
lowercase_ : Optional[int] = pre_tok_class(**__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = add_prefix_space
lowercase_ : Tuple = '''post_processor'''
lowercase_ : Any = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
lowercase_ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ : List[str] = tuple(state['''sep'''] )
if "cls" in state:
lowercase_ : List[str] = tuple(state['''cls'''] )
lowercase_ : List[Any] = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
lowercase_ : int = add_prefix_space
lowercase_ : Optional[int] = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
lowercase_ : Tuple = trim_offsets
lowercase_ : int = True
if changes_to_apply:
lowercase_ : Tuple = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
lowercase_ : Optional[Any] = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
def _snake_case ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
lowercase_ : Union[str, Any] = value
def _snake_case ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Optional[Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
lowercase_ : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Optional[int] = [self.sep_token_id]
lowercase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 93 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Optional[int] = int(__SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : List[str] = divmod(__SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : str = str(__SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('''No input value was provided''' )
lowercase_ : Optional[int] = '''-''' if number.startswith('''-''' ) else ''''''
lowercase_ : Union[str, Any] = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return F'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.