code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__lowerCAmelCase : Optional[Any] = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__lowerCAmelCase : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a__ ( A_ ):
'''simple docstring'''
if "://" in dataset_path:
__magic_name__ = dataset_path.split("""://""" )[1]
return dataset_path
def a__ ( A_ ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = not is_remote_filesystem(A_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(A_ ), fs._strip_protocol(A_ ) )
else:
fs.mv(A_, A_, recursive=A_ )
def a__ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn, """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__magic_name__ = None
__magic_name__ = None
__magic_name__ = threading.Lock()
| 88
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : int = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Any = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
)
__lowerCAmelCase : str = '|'.join(sys.argv[1:])
__lowerCAmelCase : Tuple = re.compile(RF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : Union[str, Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 88
| 1
|
'''simple docstring'''
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
A__ : List[str] ={
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Dict = '''facebook/nllb-200-distilled-600M'''
_lowercase: int = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
_lowercase: Any = '''translator'''
_lowercase: Optional[int] = AutoTokenizer
_lowercase: str = AutoModelForSeqaSeqLM
_lowercase: List[Any] = LANGUAGE_CODES
_lowercase: Tuple = ['''text''', '''text''', '''text''']
_lowercase: List[str] = ['''text''']
def lowercase__ ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict ) -> Optional[Any]:
if src_lang not in self.lang_to_code:
raise ValueError(f"{src_lang} is not a supported language." )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"{tgt_lang} is not a supported language." )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__snake_case , return_tensors="""pt""" , src_lang=__snake_case , tgt_lang=__snake_case )
def lowercase__ ( self : Optional[int] , __snake_case : Any ) -> List[str]:
return self.model.generate(**__snake_case )
def lowercase__ ( self : Dict , __snake_case : List[Any] ) -> Any:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__snake_case )
| 353
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase__ ( self : List[Any] ) -> List[str]:
_lowerCAmelCase = 1
_lowerCAmelCase = 3
_lowerCAmelCase = (32, 32)
_lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__snake_case )
return image
@property
def lowercase__ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def lowercase__ ( self : Optional[int] ) -> List[str]:
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def lowercase__ ( self : Dict ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(__snake_case )
@property
def lowercase__ ( self : Union[str, Any] ) -> str:
def extract(*__snake_case : List[Any] , **__snake_case : Any ):
class UpperCAmelCase :
def __init__( self : Any ) -> Any:
_lowerCAmelCase = torch.ones([0] )
def lowercase__ ( self : Optional[Any] , __snake_case : Tuple ) -> Dict:
self.pixel_values.to(__snake_case )
return self
return Out()
return extract
def lowercase__ ( self : List[str] ) -> Optional[int]:
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.dummy_cond_unet
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=__snake_case )
_lowerCAmelCase = self.dummy_vae
_lowerCAmelCase = self.dummy_text_encoder
_lowerCAmelCase = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
_lowerCAmelCase = 77
_lowerCAmelCase = self.dummy_image.to(__snake_case )
_lowerCAmelCase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_lowerCAmelCase = AltDiffusionImgaImgPipeline(
unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__snake_case )
_lowerCAmelCase = alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
_lowerCAmelCase = """A painting of a squirrel eating a burger"""
_lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(0 )
_lowerCAmelCase = alt_pipe(
[prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__snake_case , )
_lowerCAmelCase = output.images
_lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(0 )
_lowerCAmelCase = alt_pipe(
[prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__snake_case , return_dict=__snake_case , )[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowercase__ ( self : Tuple ) -> str:
_lowerCAmelCase = self.dummy_cond_unet
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=__snake_case )
_lowerCAmelCase = self.dummy_vae
_lowerCAmelCase = self.dummy_text_encoder
_lowerCAmelCase = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
_lowerCAmelCase = 77
_lowerCAmelCase = self.dummy_image.to(__snake_case )
# put models in fp16
_lowerCAmelCase = unet.half()
_lowerCAmelCase = vae.half()
_lowerCAmelCase = bert.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase = AltDiffusionImgaImgPipeline(
unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__snake_case )
_lowerCAmelCase = alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
_lowerCAmelCase = """A painting of a squirrel eating a burger"""
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = alt_pipe(
[prompt] , generator=__snake_case , num_inference_steps=2 , output_type="""np""" , image=__snake_case , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
_lowerCAmelCase = init_image.resize((7_60, 5_04) )
_lowerCAmelCase = """BAAI/AltDiffusion"""
_lowerCAmelCase = AltDiffusionImgaImgPipeline.from_pretrained(
__snake_case , safety_checker=__snake_case , )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
_lowerCAmelCase = """A fantasy landscape, trending on artstation"""
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , generator=__snake_case , output_type="""np""" , )
_lowerCAmelCase = output.images[0]
_lowerCAmelCase = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
_lowerCAmelCase = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : int ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ) -> Tuple:
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_lowerCAmelCase = init_image.resize((7_68, 5_12) )
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
_lowerCAmelCase = """BAAI/AltDiffusion"""
_lowerCAmelCase = AltDiffusionImgaImgPipeline.from_pretrained(
__snake_case , safety_checker=__snake_case , )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
_lowerCAmelCase = """A fantasy landscape, trending on artstation"""
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , generator=__snake_case , output_type="""np""" , )
_lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 220
| 0
|
from math import pi
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 138
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_UpperCamelCase : int = logging.get_logger(__name__)
@add_end_docstrings(_a)
class UpperCAmelCase_ ( _a):
def __init__( self , *a , **a ) -> Union[str, Any]:
super().__init__(*a , **a )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _UpperCAmelCase ( self , a=None ) -> Dict:
lowercase__ : Any = {}
if top_k is not None:
lowercase__ : List[str] = top_k
return {}, {}, postprocess_params
def __call__( self , a , **a ) -> Tuple:
return super().__call__(a , **a )
def _UpperCAmelCase ( self , a ) -> Dict:
lowercase__ : List[Any] = load_image(a )
lowercase__ : Union[str, Any] = self.image_processor(images=a , return_tensors=self.framework )
return model_inputs
def _UpperCAmelCase ( self , a ) -> List[str]:
lowercase__ : Dict = self.model(**a )
return model_outputs
def _UpperCAmelCase ( self , a , a=5 ) -> Dict:
if top_k > self.model.config.num_labels:
lowercase__ : List[Any] = self.model.config.num_labels
if self.framework == "pt":
lowercase__ : Tuple = model_outputs.logits.softmax(-1 )[0]
lowercase__ , lowercase__ : Optional[Any] = probs.topk(a )
elif self.framework == "tf":
lowercase__ : Union[str, Any] = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowercase__ : str = tf.math.top_k(a , k=a )
lowercase__ , lowercase__ : Dict = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowercase__ : Dict = scores.tolist()
lowercase__ : Dict = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(a , a )]
| 77
| 0
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def _A (lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple , ) -> Tuple:
'''simple docstring'''
_a = {
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
_a , _a = input_paths_and_base_extractors[compression_format]
if input_path is None:
_a = f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase__ )
assert base_extractor.is_extractable(lowerCAmelCase__ )
_a = tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(lowerCAmelCase__ , lowerCAmelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_a = file_path.read_text(encoding='utf-8' )
else:
_a = output_path.read_text(encoding='utf-8' )
_a = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] , ) -> Tuple:
'''simple docstring'''
_a = {
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
_a = input_paths[compression_format]
if input_path is None:
_a = f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase__ )
_a = Extractor.infer_extractor_format(lowerCAmelCase__ )
assert extractor_format is not None
_a = tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_a = file_path.read_text(encoding='utf-8' )
else:
_a = output_path.read_text(encoding='utf-8' )
_a = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def _A (lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] ) -> int:
'''simple docstring'''
import tarfile
_a = tmp_path / 'data_dot_dot'
directory.mkdir()
_a = directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(lowerCAmelCase__ , 'w' ) as f:
f.add(lowerCAmelCase__ , arcname=os.path.join('..' , text_file.name ) )
return path
@pytest.fixture
def _A (lowerCAmelCase__ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
import tarfile
_a = tmp_path / 'data_sym_link'
directory.mkdir()
_a = directory / 'tar_file_with_sym_link.tar'
os.symlink('..' , directory / 'subdir' , target_is_directory=lowerCAmelCase__ )
with tarfile.TarFile(lowerCAmelCase__ , 'w' ) as f:
f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , )
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
_a = {
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
_a = insecure_tar_files[insecure_tar_file]
_a = tmp_path / 'extracted'
TarExtractor.extract(lowerCAmelCase__ , lowerCAmelCase__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _A (lowerCAmelCase__ :Dict ) -> Optional[int]:
'''simple docstring'''
_a = tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
_a = (
B'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
B'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
B'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
B'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb' ) as f:
f.write(lowerCAmelCase__ )
assert zipfile.is_zipfile(str(lowerCAmelCase__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(lowerCAmelCase__ ) # but we're right
| 358
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[str] = logging.get_logger(__name__)
a_ : str = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """git_vision_model"""
def __init__( self , __magic_name__=7_68 , __magic_name__=30_72 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3 , __magic_name__=2_24 , __magic_name__=16 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.0_2 , **__magic_name__ , ) -> Union[str, Any]:
super().__init__(**__magic_name__ )
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
_a = num_channels
_a = patch_size
_a = image_size
_a = initializer_range
_a = attention_dropout
_a = layer_norm_eps
_a = hidden_act
@classmethod
def __UpperCAmelCase ( cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
_a , _a = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
_a = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """git"""
def __init__( self , __magic_name__=None , __magic_name__=3_05_22 , __magic_name__=7_68 , __magic_name__=6 , __magic_name__=12 , __magic_name__=30_72 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10_24 , __magic_name__=0.0_2 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=False , __magic_name__=1_01 , __magic_name__=1_02 , __magic_name__=None , **__magic_name__ , ) -> Optional[int]:
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , pad_token_id=__magic_name__ , **__magic_name__ )
if vision_config is None:
_a = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
_a = GitVisionConfig(**__magic_name__ )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = tie_word_embeddings
_a = num_image_with_embedding
_a = bos_token_id
_a = eos_token_id
def __UpperCAmelCase ( self ) -> List[str]:
_a = copy.deepcopy(self.__dict__ )
_a = self.vision_config.to_dict()
_a = self.__class__.model_type
return output
| 104
| 0
|
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=False , snake_case=True , snake_case="None" , snake_case=3 , snake_case=4 , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = relative_attention
lowercase = position_biased_input
lowercase = pos_att_type
lowercase = scope
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_config()
lowercase = 300
return config
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = DebertaModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )[0]
lowercase = model(snake_case , token_type_ids=snake_case )[0]
lowercase = model(snake_case )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = DebertaForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = self.num_labels
lowercase = DebertaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = self.num_labels
lowercase = DebertaForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
lowercase = DebertaForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Optional[Any] = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : Tuple = False
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : int = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = DebertaModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = DebertaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = DebertaModel.from_pretrained('microsoft/deberta-base' )
lowercase = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase = model(snake_case , attention_mask=snake_case )[0]
# compare the actual values for a slice.
lowercase = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 195
|
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def UpperCAmelCase_ ( ):
lowercase = Github(os.environ['GITHUB_TOKEN'] )
lowercase = g.get_repo('huggingface/diffusers' )
lowercase = repo.get_issues(state='open' )
for issue in open_issues:
lowercase = sorted(issue.get_comments() , key=lambda __SCREAMING_SNAKE_CASE : i.created_at , reverse=__SCREAMING_SNAKE_CASE )
lowercase = comments[0] if len(__SCREAMING_SNAKE_CASE ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 195
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : list ) -> list:
"""simple docstring"""
if len(lowerCamelCase__ ) <= 1:
return [tuple(lowerCamelCase__ )]
_lowerCAmelCase = []
def generate(snake_case_ : int , snake_case_ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCamelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_lowerCAmelCase = arr[k - 1], arr[i]
else: # k is odd
_lowerCAmelCase = arr[k - 1], arr[0]
generate(k - 1 , lowerCamelCase__ )
generate(len(lowerCamelCase__ ) , lowerCamelCase__ )
return res
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = input('''Enter numbers separated by a comma:\n''').strip()
__SCREAMING_SNAKE_CASE : str = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 354
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 600851475143 ) -> int:
"""simple docstring"""
try:
_lowerCAmelCase = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
while n % i == 0:
_lowerCAmelCase = i
n //= i
i += 1
if n > 1:
_lowerCAmelCase = n
return int(snake_case_ )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 0
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__UpperCamelCase = logging.getLogger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
# save results
if os.path.exists(UpperCAmelCase ):
if os.path.exists(os.path.join(UpperCAmelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(UpperCAmelCase , 'config.json' ) ):
os.remove(os.path.join(UpperCAmelCase , 'config.json' ) )
if os.path.exists(os.path.join(UpperCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(UpperCAmelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(UpperCAmelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=False ) -> Union[str, Any]:
snake_case_ = 2
if unlogit:
snake_case_ = torch.pow(UpperCAmelCase , UpperCAmelCase )
snake_case_ = p * torch.log(UpperCAmelCase )
snake_case_ = 0
return -plogp.sum(dim=-1 )
def UpperCAmelCase ( UpperCAmelCase ) -> List[str]:
logger.info('lv, h >\t' + '\t'.join(f'{x + 1}' for x in range(len(UpperCAmelCase ) ) ) )
for row in range(len(UpperCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:d}' for x in tensor[row].cpu().data ) )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False ) -> Union[str, Any]:
snake_case_ , snake_case_ = model.config.num_hidden_layers, model.config.num_attention_heads
snake_case_ = torch.zeros(UpperCAmelCase , UpperCAmelCase ).to(args.device )
snake_case_ = torch.zeros(UpperCAmelCase , UpperCAmelCase ).to(args.device )
if head_mask is None:
snake_case_ = torch.ones(UpperCAmelCase , UpperCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
snake_case_ = None
snake_case_ = 0.0
snake_case_ = 0.0
for step, inputs in enumerate(tqdm(UpperCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
snake_case_ = tuple(t.to(args.device ) for t in inputs )
((snake_case_) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
snake_case_ = model(UpperCAmelCase , labels=UpperCAmelCase , head_mask=UpperCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
snake_case_ , snake_case_ , snake_case_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCAmelCase ):
snake_case_ = entropy(attn.detach() , UpperCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
snake_case_ = 2
snake_case_ = torch.pow(torch.pow(UpperCAmelCase , UpperCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
snake_case_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(UpperCAmelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(UpperCAmelCase )
logger.info('Head ranked by importance scores' )
snake_case_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
snake_case_ = torch.arange(
head_importance.numel() , device=args.device )
snake_case_ = head_ranks.view_as(UpperCAmelCase )
print_ad_tensor(UpperCAmelCase )
return attn_entropy, head_importance, total_loss
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ , snake_case_ , snake_case_ = compute_heads_importance(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , compute_entropy=UpperCAmelCase )
snake_case_ = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , UpperCAmelCase , original_score * args.masking_threshold )
snake_case_ = torch.ones_like(UpperCAmelCase )
snake_case_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
snake_case_ = original_score
while current_score >= original_score * args.masking_threshold:
snake_case_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
snake_case_ = float('Inf' )
snake_case_ = head_importance.view(-1 ).sort()[1]
if len(UpperCAmelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
snake_case_ = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
snake_case_ = new_head_mask.view(-1 )
snake_case_ = 0.0
snake_case_ = new_head_mask.view_as(UpperCAmelCase )
snake_case_ = new_head_mask.clone().detach()
print_ad_tensor(UpperCAmelCase )
# Compute metric and head importance again
snake_case_ , snake_case_ , snake_case_ = compute_heads_importance(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , compute_entropy=UpperCAmelCase , head_mask=UpperCAmelCase )
snake_case_ = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , UpperCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('Final head mask' )
print_ad_tensor(UpperCAmelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
snake_case_ = datetime.now()
snake_case_ , snake_case_ , snake_case_ = compute_heads_importance(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , compute_entropy=UpperCAmelCase , compute_importance=UpperCAmelCase , head_mask=UpperCAmelCase )
snake_case_ = 1 / loss
snake_case_ = datetime.now() - before_time
snake_case_ = sum(p.numel() for p in model.parameters() )
snake_case_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
snake_case_ = [
v,
]
assert sum(len(UpperCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCAmelCase )
snake_case_ = sum(p.numel() for p in model.parameters() )
snake_case_ = datetime.now()
snake_case_ , snake_case_ , snake_case_ = compute_heads_importance(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , compute_entropy=UpperCAmelCase , compute_importance=UpperCAmelCase , head_mask=UpperCAmelCase , actually_pruned=UpperCAmelCase , )
snake_case_ = 1 / loss
snake_case_ = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , UpperCAmelCase , UpperCAmelCase , pruned_num_params / original_num_params * 100 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , UpperCAmelCase , UpperCAmelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 )
save_model(UpperCAmelCase , args.output_dir )
def UpperCAmelCase ( ) -> List[str]:
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=UpperCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=UpperCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=UpperCAmelCase , type=UpperCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=UpperCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=UpperCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=UpperCAmelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=UpperCAmelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=128 , type=UpperCAmelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=UpperCAmelCase , help='Batch size.' )
parser.add_argument('--seed' , type=UpperCAmelCase , default=42 )
parser.add_argument('--local_rank' , type=UpperCAmelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=UpperCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=UpperCAmelCase , default='' , help='Can be used for distant debugging.' )
snake_case_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
snake_case_ = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
snake_case_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
snake_case_ = torch.device('cuda' , args.local_rank )
snake_case_ = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
snake_case_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
snake_case_ = nn.parallel.DistributedDataParallel(
UpperCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCAmelCase )
elif args.n_gpu > 1:
snake_case_ = nn.DataParallel(UpperCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=UpperCAmelCase )
torch.save(UpperCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , UpperCAmelCase )
# Prepare dataset
snake_case_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
snake_case_ = (torch.from_numpy(UpperCAmelCase ),)
snake_case_ = TensorDataset(*UpperCAmelCase )
snake_case_ = RandomSampler(UpperCAmelCase )
snake_case_ = DataLoader(UpperCAmelCase , sampler=UpperCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
snake_case_ = mask_heads(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
prune_heads(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 69
|
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__A =logging.getLogger()
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Union[str, Any] = """\n""".join(UpperCamelCase__ )
Path(UpperCamelCase__ ).open("""w""" ).writelines(UpperCamelCase__ )
__A ='patrickvonplaten/t5-tiny-random'
__A ='sshleifer/bart-tiny-random'
__A ='sshleifer/tiny-mbart'
__A =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _snake_case ( a__ ):
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Any = Path(self.get_auto_remove_tmp_dir()) / """utest_input.source"""
UpperCAmelCase__ : Dict = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
UpperCAmelCase__ : Any = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = str(Path(self.get_auto_remove_tmp_dir()) / """scores.json""")
UpperCAmelCase__ : int = """translation_en_to_de""" if model == T5_TINY else """summarization"""
UpperCAmelCase__ : Union[str, Any] = f'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(_lowerCamelCase , """argv""" , _lowerCamelCase):
run_generate()
assert Path(_lowerCamelCase).exists()
# os.remove(Path(output_file_name))
def snake_case__ ( self):
self.run_eval_tester(_lowerCamelCase)
@parameterized.expand([BART_TINY, MBART_TINY])
@slow
def snake_case__ ( self , _lowerCamelCase):
self.run_eval_tester(_lowerCamelCase)
@parameterized.expand([T5_TINY, MBART_TINY])
@slow
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = Path(self.get_auto_remove_tmp_dir()) / """utest_input.source"""
UpperCAmelCase__ : List[str] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
UpperCAmelCase__ : int = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
UpperCAmelCase__ : int = Path(self.get_auto_remove_tmp_dir())
UpperCAmelCase__ : Any = str(tmp_dir / """scores.json""")
UpperCAmelCase__ : List[str] = str(tmp_dir / """val.target""")
_dump_articles(_lowerCamelCase , text["""en"""])
_dump_articles(_lowerCamelCase , text["""de"""])
UpperCAmelCase__ : int = """translation_en_to_de""" if model == T5_TINY else """summarization"""
UpperCAmelCase__ : List[Any] = f'''
run_eval_search.py
{model}
{str(_lowerCamelCase)}
{str(_lowerCamelCase)}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""])
with patch.object(_lowerCamelCase , """argv""" , _lowerCamelCase):
with CaptureStdout() as cs:
run_search()
UpperCAmelCase__ : Optional[Any] = [""" num_beams | length_penalty""", model, """Best score args"""]
UpperCAmelCase__ : Any = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""")
else:
expected_strings.extend(_lowerCamelCase)
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_lowerCamelCase).exists()
os.remove(Path(_lowerCamelCase))
| 163
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowercase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase : List[str] = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class __snake_case ( lowerCAmelCase ):
_a : Union[PIL.Image.Image, np.ndarray]
class __snake_case ( lowerCAmelCase ):
def __init__( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
super().__init__()
self.register_modules(
prior=snake_case ,image_encoder=snake_case ,image_processor=snake_case ,scheduler=snake_case ,renderer=snake_case ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
if latents is None:
lowercase : Any = randn_tensor(snake_case ,generator=snake_case ,device=snake_case ,dtype=snake_case )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
lowercase : Tuple = latents.to(snake_case )
lowercase : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def _SCREAMING_SNAKE_CASE ( self ,snake_case=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowercase : Union[str, Any] = torch.device(f"cuda:{gpu_id}" )
lowercase : Any = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case ,snake_case )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder ,"""_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(snake_case ,"""_hf_hook""" )
and hasattr(module._hf_hook ,"""execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
if isinstance(snake_case ,snake_case ) and isinstance(image[0] ,torch.Tensor ):
lowercase : Optional[Any] = torch.cat(snake_case ,axis=0 ) if image[0].ndim == 4 else torch.stack(snake_case ,axis=0 )
if not isinstance(snake_case ,torch.Tensor ):
lowercase : List[str] = self.image_processor(snake_case ,return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
lowercase : List[Any] = image.to(dtype=self.image_encoder.dtype ,device=snake_case )
lowercase : Any = self.image_encoder(snake_case )["""last_hidden_state"""]
lowercase : Any = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase : int = image_embeds.repeat_interleave(snake_case ,dim=0 )
if do_classifier_free_guidance:
lowercase : Union[str, Any] = torch.zeros_like(snake_case )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(snake_case )
def __call__( self ,snake_case ,snake_case = 1 ,snake_case = 25 ,snake_case = None ,snake_case = None ,snake_case = 4.0 ,snake_case = 64 ,snake_case = "pil" ,snake_case = True ,):
'''simple docstring'''
if isinstance(snake_case ,PIL.Image.Image ):
lowercase : List[str] = 1
elif isinstance(snake_case ,torch.Tensor ):
lowercase : Dict = image.shape[0]
elif isinstance(snake_case ,snake_case ) and isinstance(image[0] ,(torch.Tensor, PIL.Image.Image) ):
lowercase : List[Any] = len(snake_case )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(snake_case )}" )
lowercase : str = self._execution_device
lowercase : Optional[Any] = batch_size * num_images_per_prompt
lowercase : Optional[int] = guidance_scale > 1.0
lowercase : Union[str, Any] = self._encode_image(snake_case ,snake_case ,snake_case ,snake_case )
# prior
self.scheduler.set_timesteps(snake_case ,device=snake_case )
lowercase : List[str] = self.scheduler.timesteps
lowercase : List[str] = self.prior.config.num_embeddings
lowercase : str = self.prior.config.embedding_dim
lowercase : Any = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) ,image_embeds.dtype ,snake_case ,snake_case ,snake_case ,self.scheduler ,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase : List[str] = latents.reshape(latents.shape[0] ,snake_case ,snake_case )
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowercase : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase : Union[str, Any] = self.scheduler.scale_model_input(snake_case ,snake_case )
lowercase : Union[str, Any] = self.prior(
snake_case ,timestep=snake_case ,proj_embedding=snake_case ,).predicted_image_embedding
# remove the variance
lowercase , lowercase : Tuple = noise_pred.split(
scaled_model_input.shape[2] ,dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase , lowercase : Any = noise_pred.chunk(2 )
lowercase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase : Union[str, Any] = self.scheduler.step(
snake_case ,timestep=snake_case ,sample=snake_case ,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=snake_case )
lowercase : Any = []
for i, latent in enumerate(snake_case ):
print()
lowercase : Union[str, Any] = self.renderer.decode(
latent[None, :] ,snake_case ,size=snake_case ,ray_batch_size=4096 ,n_coarse_samples=64 ,n_fine_samples=128 ,)
images.append(snake_case )
lowercase : Optional[int] = torch.stack(snake_case )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
lowercase : str = images.cpu().numpy()
if output_type == "pil":
lowercase : Optional[int] = [self.numpy_to_pil(snake_case ) for image in images]
# Offload last model to CPU
if hasattr(self ,"""final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=snake_case )
| 285
|
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 285
| 1
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
a_ = """scheduler_config.json"""
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 3
lowerCAmelCase__ = 4
lowerCAmelCase__ = 5
@dataclass
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
class __lowerCAmelCase :
lowerCAmelCase__ = SCHEDULER_CONFIG_NAME
lowerCAmelCase__ = ["""dtype"""]
lowerCAmelCase__ = []
lowerCAmelCase__ = True
@classmethod
def lowerCamelCase ( cls , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = cls.load_config(
pretrained_model_name_or_path=__UpperCAmelCase , subfolder=__UpperCAmelCase , return_unused_kwargs=__UpperCAmelCase , **__UpperCAmelCase , )
__lowerCamelCase ,__lowerCamelCase = cls.from_config(__UpperCAmelCase , return_unused_kwargs=__UpperCAmelCase , **__UpperCAmelCase )
if hasattr(__UpperCAmelCase , '''create_state''' ) and getattr(__UpperCAmelCase , '''has_state''' , __UpperCAmelCase ):
__lowerCamelCase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , **__UpperCAmelCase ):
'''simple docstring'''
self.save_config(save_directory=__UpperCAmelCase , push_to_hub=__UpperCAmelCase , **__UpperCAmelCase )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCamelCase ( cls ):
'''simple docstring'''
__lowerCamelCase = list(set([cls.__name__] + cls._compatibles ) )
__lowerCamelCase = importlib.import_module(__name__.split('''.''' )[0] )
__lowerCamelCase = [
getattr(__UpperCAmelCase , __UpperCAmelCase ) for c in compatible_classes_str if hasattr(__UpperCAmelCase , __UpperCAmelCase )
]
return compatible_classes
def a__ ( _UpperCamelCase : jnp.ndarray ,_UpperCamelCase : Tuple[int] ):
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) ,_UpperCamelCase )
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : str=0.999 ,_UpperCamelCase : Optional[int]=jnp.floataa ):
def alpha_bar(_UpperCamelCase : List[str] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
__lowerCamelCase = []
for i in range(_UpperCamelCase ):
__lowerCamelCase = i / num_diffusion_timesteps
__lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) ,_UpperCamelCase ) )
return jnp.array(_UpperCamelCase ,dtype=_UpperCamelCase )
@flax.struct.dataclass
class __lowerCAmelCase :
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
@classmethod
def lowerCamelCase ( cls , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = scheduler.config
if config.trained_betas is not None:
__lowerCamelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__lowerCamelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCamelCase = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCamelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
__lowerCamelCase = 1.0 - betas
__lowerCamelCase = jnp.cumprod(__UpperCAmelCase , axis=0 )
return cls(
alphas=__UpperCAmelCase , betas=__UpperCAmelCase , alphas_cumprod=__UpperCAmelCase , )
def a__ ( _UpperCamelCase : CommonSchedulerState ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : jnp.ndarray ):
__lowerCamelCase = state.alphas_cumprod
__lowerCamelCase = alphas_cumprod[timesteps] ** 0.5
__lowerCamelCase = sqrt_alpha_prod.flatten()
__lowerCamelCase = broadcast_to_shape_from_left(_UpperCamelCase ,original_samples.shape )
__lowerCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
__lowerCamelCase = sqrt_one_minus_alpha_prod.flatten()
__lowerCamelCase = broadcast_to_shape_from_left(_UpperCamelCase ,original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def a__ ( _UpperCamelCase : CommonSchedulerState ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : jnp.ndarray ):
__lowerCamelCase ,__lowerCamelCase = get_sqrt_alpha_prod(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def a__ ( _UpperCamelCase : CommonSchedulerState ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : jnp.ndarray ,_UpperCamelCase : jnp.ndarray ):
__lowerCamelCase ,__lowerCamelCase = get_sqrt_alpha_prod(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 330
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 330
| 1
|
from collections.abc import Callable
import numpy as np
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Callable , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
lowercase__ : List[str] = int(np.ceil((x_end - xa) / step_size ) )
lowercase__ : Dict = np.zeros((n + 1,) )
lowercase__ : Optional[Any] = ya
lowercase__ : List[str] = xa
for k in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ : Optional[Any] = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE_ , y[k] )
lowercase__ : int = y[k] + (
(step_size / 2) * (ode_func(SCREAMING_SNAKE_CASE_ , y[k] ) + ode_func(x + step_size , SCREAMING_SNAKE_CASE_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
from __future__ import annotations
from collections.abc import Callable
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int = 100 , ):
'''simple docstring'''
lowercase__ : Tuple = x_start
lowercase__ : Tuple = fnc(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = 0.0
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowercase__ : Any = (x_end - x_start) / steps + xa
lowercase__ : Optional[Any] = fnc(SCREAMING_SNAKE_CASE_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
lowercase__ : Any = xa
lowercase__ : str = fxa
return area
if __name__ == "__main__":
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
snake_case_ = 10
while i <= 100_000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 216
| 0
|
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCamelCase__ = random.Random()
if is_torch_available():
import torch
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
if rng is None:
lowerCAmelCase__ : int = global_rng
lowerCAmelCase__ : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A__ ( unittest.TestCase ):
def __init__( self : Dict , a : Optional[int] , a : Tuple=7 , a : Optional[Any]=400 , a : Optional[int]=2_000 , a : Any=1 , a : List[Any]=0.0 , a : Tuple=16_000 , a : Any=True , a : List[Any]=True , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = parent
lowerCAmelCase__ : Dict = batch_size
lowerCAmelCase__ : int = min_seq_length
lowerCAmelCase__ : str = max_seq_length
lowerCAmelCase__ : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase__ : str = feature_size
lowerCAmelCase__ : Optional[Any] = padding_value
lowerCAmelCase__ : Union[str, Any] = sampling_rate
lowerCAmelCase__ : List[Any] = return_attention_mask
lowerCAmelCase__ : str = do_normalize
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCamelCase ( self : Tuple , a : List[Any]=False , a : str=False ):
'''simple docstring'''
def _flatten(a : Tuple ):
return list(itertools.chain(*a ) )
if equal_length:
lowerCAmelCase__ : Optional[int] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase__ : Optional[Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase__ : List[str] = [np.asarray(a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( snake_case__ , unittest.TestCase ):
lowercase = ASTFeatureExtractor
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = ASTFeatureExtractionTester(self )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase__ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase__ : Union[str, Any] = [np.asarray(a ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase__ : Tuple = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase__ : int = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test batched
lowerCAmelCase__ : int = feat_extract(a , padding=a , return_tensors='np' ).input_values
lowerCAmelCase__ : Optional[int] = feat_extract(a , padding=a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase__ : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCAmelCase__ : List[Any] = np.asarray(a )
lowerCAmelCase__ : Any = feat_extract(a , return_tensors='np' ).input_values
lowerCAmelCase__ : Optional[int] = feat_extract(a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
@require_torch
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
import torch
lowerCAmelCase__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ : Optional[int] = np.random.rand(100 ).astype(np.floataa )
lowerCAmelCase__ : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase__ : Tuple = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase__ : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowerCamelCase ( self : Dict , a : Any ):
'''simple docstring'''
from datasets import load_dataset
lowerCAmelCase__ : Dict = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase__ : Dict = ds.sort('id' ).select(range(a ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
lowerCAmelCase__ : Optional[int] = self._load_datasamples(1 )
lowerCAmelCase__ : List[str] = ASTFeatureExtractor()
lowerCAmelCase__ : Tuple = feature_extractor(a , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a , atol=1E-4 ) )
| 212
|
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
_UpperCAmelCase = {} # Mapping from char to TrieNode
_UpperCAmelCase = False
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
for word in words:
self.insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
_UpperCAmelCase = TrieNode()
_UpperCAmelCase = curr.nodes[char]
_UpperCAmelCase = True
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
_UpperCAmelCase = curr.nodes[char]
return curr.is_leaf
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
def _delete(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
if index == len(UpperCAmelCase ):
# If word does not exist
if not curr.is_leaf:
return False
_UpperCAmelCase = False
return len(curr.nodes ) == 0
_UpperCAmelCase = word[index]
_UpperCAmelCase = curr.nodes.get(UpperCAmelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_UpperCAmelCase = _delete(UpperCAmelCase , UpperCAmelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCAmelCase , 0 )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
if node.is_leaf:
print(__lowerCAmelCase , end=' ' )
for key, value in node.nodes.items():
print_words(__lowerCAmelCase , word + key )
def __A ( )-> bool:
"""simple docstring"""
_UpperCAmelCase = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase = TrieNode()
root.insert_many(__lowerCAmelCase )
# print_words(root, "")
assert all(root.find(__lowerCAmelCase ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
print(str(__lowerCAmelCase ) , 'works!' if passes else 'doesn\'t work :(' )
def __A ( )-> None:
"""simple docstring"""
assert test_trie()
def __A ( )-> None:
"""simple docstring"""
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 39
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : list ) -> list:
'''simple docstring'''
if any(not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(UpperCAmelCase_ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(UpperCAmelCase_ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 95
|
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCamelCase ( lowercase ):
def _lowercase (self : Any) -> List[Any]:
__snake_case : Any = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(_A , 'hidden_sizes'))
self.parent.assertTrue(hasattr(_A , 'num_attention_heads'))
self.parent.assertTrue(hasattr(_A , 'num_encoder_blocks'))
class UpperCamelCase :
def __init__(self : Optional[int] , _A : Any , _A : str=13 , _A : List[str]=64 , _A : List[Any]=3 , _A : Any=4 , _A : List[str]=[2, 2, 2, 2] , _A : Tuple=[8, 4, 2, 1] , _A : List[str]=[16, 32, 64, 1_28] , _A : int=[1, 4, 8, 16] , _A : List[str]=[1, 2, 4, 8] , _A : Dict=True , _A : Any=True , _A : List[str]="gelu" , _A : Optional[int]=0.1 , _A : Union[str, Any]=0.1 , _A : List[Any]=0.02 , _A : str=3 , _A : int=None , ) -> List[Any]:
__snake_case : int = parent
__snake_case : List[Any] = batch_size
__snake_case : Optional[int] = image_size
__snake_case : List[str] = num_channels
__snake_case : Any = num_encoder_blocks
__snake_case : Dict = sr_ratios
__snake_case : Any = depths
__snake_case : Tuple = hidden_sizes
__snake_case : Tuple = downsampling_rates
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Optional[int] = is_training
__snake_case : Any = use_labels
__snake_case : List[Any] = hidden_act
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : List[Any] = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Dict = scope
def _lowercase (self : List[Any]) -> Tuple:
__snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case : str = None
if self.use_labels:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
__snake_case : Dict = self.get_config()
return config, pixel_values, labels
def _lowercase (self : Any) -> Optional[int]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowercase (self : List[str] , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any]) -> int:
__snake_case : Union[str, Any] = SegformerModel(config=_A)
model.to(_A)
model.eval()
__snake_case : str = model(_A)
__snake_case : List[Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def _lowercase (self : Tuple , _A : Dict , _A : Any , _A : int) -> str:
__snake_case : Any = self.num_labels
__snake_case : List[str] = SegformerForSemanticSegmentation(_A)
model.to(_A)
model.eval()
__snake_case : Dict = model(_A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
__snake_case : Dict = model(_A , labels=_A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def _lowercase (self : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str) -> List[Any]:
__snake_case : List[Any] = 1
__snake_case : str = SegformerForSemanticSegmentation(config=_A)
model.to(_A)
model.eval()
__snake_case : Any = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(_A)
__snake_case : Any = model(_A , labels=_A)
self.parent.assertGreater(result.loss , 0.0)
def _lowercase (self : Any) -> Optional[int]:
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : str = config_and_inputs
__snake_case : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase , lowercase , unittest.TestCase ):
UpperCAmelCase : List[str] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Dict = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : List[Any] = False
def _lowercase (self : str) -> Union[str, Any]:
__snake_case : Optional[int] = SegformerModelTester(self)
__snake_case : Any = SegformerConfigTester(self , config_class=_A)
def _lowercase (self : List[str]) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase (self : List[Any]) -> List[str]:
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
def _lowercase (self : Optional[int]) -> str:
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_A)
def _lowercase (self : int) -> Union[str, Any]:
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_A)
@unittest.skip('SegFormer does not use inputs_embeds')
def _lowercase (self : Union[str, Any]) -> str:
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods')
def _lowercase (self : int) -> str:
pass
def _lowercase (self : List[str]) -> Any:
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(_A)
__snake_case : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A)
def _lowercase (self : List[str]) -> List[Any]:
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] = True
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = True
__snake_case : List[str] = False
__snake_case : Tuple = True
__snake_case : int = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
__snake_case : int = model(**self._prepare_for_class(_A , _A))
__snake_case : Union[str, Any] = outputs.attentions
__snake_case : int = sum(self.model_tester.depths)
self.assertEqual(len(_A) , _A)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : int = True
__snake_case : Union[str, Any] = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(_A , _A))
__snake_case : Optional[int] = outputs.attentions
self.assertEqual(len(_A) , _A)
# verify the first attentions (first block, first layer)
__snake_case : Optional[int] = (self.model_tester.image_size // 4) ** 2
__snake_case : Tuple = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__snake_case : int = (self.model_tester.image_size // 32) ** 2
__snake_case : Any = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__snake_case : int = len(_A)
# Check attention is always last and order is fine
__snake_case : Any = True
__snake_case : Tuple = True
__snake_case : Optional[Any] = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
__snake_case : Any = model(**self._prepare_for_class(_A , _A))
self.assertEqual(out_len + 1 , len(_A))
__snake_case : List[Any] = outputs.attentions
self.assertEqual(len(_A) , _A)
# verify the first attentions (first block, first layer)
__snake_case : Any = (self.model_tester.image_size // 4) ** 2
__snake_case : List[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _lowercase (self : str) -> List[str]:
def check_hidden_states_output(_A : Union[str, Any] , _A : List[str] , _A : Tuple):
__snake_case : Tuple = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
__snake_case : Optional[int] = model(**self._prepare_for_class(_A , _A))
__snake_case : List[str] = outputs.hidden_states
__snake_case : Tuple = self.model_tester.num_encoder_blocks
self.assertEqual(len(_A) , _A)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = True
check_hidden_states_output(_A , _A , _A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Any = True
check_hidden_states_output(_A , _A , _A)
def _lowercase (self : Optional[int]) -> int:
if not self.model_tester.is_training:
return
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = True
for model_class in self.all_model_classes:
if model_class in get_values(_A):
continue
__snake_case : Tuple = model_class(_A)
model.to(_A)
model.train()
__snake_case : str = self._prepare_for_class(_A , _A , return_labels=_A)
__snake_case : Dict = model(**_A).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def _lowercase (self : Tuple) -> Dict:
pass
@slow
def _lowercase (self : Any) -> List[str]:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[Any] = SegformerModel.from_pretrained(_A)
self.assertIsNotNone(_A)
def __UpperCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _lowercase (self : Union[str, Any]) -> Any:
# only resize + normalize
__snake_case : List[str] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_A , align=_A , do_random_crop=_A)
__snake_case : Tuple = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
_A)
__snake_case : Optional[Any] = prepare_img()
__snake_case : Tuple = image_processor(images=_A , return_tensors='pt')
__snake_case : List[str] = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
__snake_case : Any = model(_A)
__snake_case : Optional[int] = torch.Size((1, model.config.num_labels, 1_28, 1_28))
self.assertEqual(outputs.logits.shape , _A)
__snake_case : Tuple = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
]).to(_A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _A , atol=1E-4))
@slow
def _lowercase (self : Any) -> Optional[int]:
# only resize + normalize
__snake_case : int = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_A , align=_A , do_random_crop=_A)
__snake_case : Tuple = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024').to(_A)
__snake_case : str = prepare_img()
__snake_case : Union[str, Any] = image_processor(images=_A , return_tensors='pt')
__snake_case : str = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
__snake_case : Any = model(_A)
__snake_case : Any = torch.Size((1, model.config.num_labels, 1_28, 1_28))
self.assertEqual(outputs.logits.shape , _A)
__snake_case : List[Any] = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
]).to(_A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _A , atol=1E-1))
@slow
def _lowercase (self : Optional[int]) -> Union[str, Any]:
# only resize + normalize
__snake_case : List[Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_A , align=_A , do_random_crop=_A)
__snake_case : Optional[Any] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
_A)
__snake_case : Optional[Any] = prepare_img()
__snake_case : Optional[Any] = image_processor(images=_A , return_tensors='pt')
__snake_case : Union[str, Any] = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
__snake_case : Any = model(_A)
__snake_case : Optional[Any] = outputs.logits.detach().cpu()
__snake_case : Any = image_processor.post_process_semantic_segmentation(outputs=_A , target_sizes=[(5_00, 3_00)])
__snake_case : Any = torch.Size((5_00, 3_00))
self.assertEqual(segmentation[0].shape , _A)
__snake_case : Any = image_processor.post_process_semantic_segmentation(outputs=_A)
__snake_case : str = torch.Size((1_28, 1_28))
self.assertEqual(segmentation[0].shape , _A)
| 95
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a : Optional[Any] = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
a : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105
|
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _SCREAMING_SNAKE_CASE ( _lowercase : np.ndarray , _lowercase : np.ndarray ) ->float:
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_lowercase , _lowercase ) ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : np.ndarray , _lowercase : np.ndarray ) ->list[list[list[float] | float]]:
'''simple docstring'''
if dataset.ndim != value_array.ndim:
a : str = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(_lowercase )
try:
if dataset.shape[1] != value_array.shape[1]:
a : int = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(_lowercase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
a : Union[str, Any] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(_lowercase )
a : str = []
for value in value_array:
a : List[Any] = euclidean(_lowercase , dataset[0] )
a : str = dataset[0].tolist()
for dataset_value in dataset[1:]:
a : Tuple = euclidean(_lowercase , _lowercase )
if dist > temp_dist:
a : Dict = temp_dist
a : Optional[int] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _SCREAMING_SNAKE_CASE ( _lowercase : np.ndarray , _lowercase : np.ndarray ) ->float:
'''simple docstring'''
return np.dot(_lowercase , _lowercase ) / (norm(_lowercase ) * norm(_lowercase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self :Optional[int] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :List[Any]=1_3 ,_UpperCamelCase :List[str]=7 ,_UpperCamelCase :Union[str, Any]=True ,_UpperCamelCase :Tuple=True ,_UpperCamelCase :Dict=True ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :Dict=9_9 ,_UpperCamelCase :Optional[Any]=3_2 ,_UpperCamelCase :Tuple=5 ,_UpperCamelCase :List[Any]=4 ,_UpperCamelCase :Any=3_7 ,_UpperCamelCase :int="gelu" ,_UpperCamelCase :Tuple=0.1 ,_UpperCamelCase :List[Any]=0.1 ,_UpperCamelCase :Optional[int]=5_1_2 ,_UpperCamelCase :int=1_6 ,_UpperCamelCase :Optional[Any]=2 ,_UpperCamelCase :Dict=0.02 ,_UpperCamelCase :Dict=4 ,):
snake_case_ : Any = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : int = seq_length
snake_case_ : int = is_training
snake_case_ : List[str] = use_attention_mask
snake_case_ : List[Any] = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : int = vocab_size
snake_case_ : Tuple = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : int = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Dict = max_position_embeddings
snake_case_ : int = type_vocab_size
snake_case_ : Union[str, Any] = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : Dict = num_choices
def a__ ( self :Tuple ):
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case_ : int = None
if self.use_attention_mask:
snake_case_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Tuple = None
if self.use_token_type_ids:
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
snake_case_ : Dict = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=a__ ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def a__ ( self :Tuple ):
snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def a__ ( self :Any ):
snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : Any = True
snake_case_ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase : Union[str, Any] = True
lowercase : int = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ ( self :List[Any] ):
snake_case_ : Dict = FlaxBertModelTester(self )
@slow
def a__ ( self :Optional[Any] ):
snake_case_ : str = FlaxBertModel.from_pretrained("""bert-base-cased""" )
snake_case_ : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(a__ )
| 353
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__A : Tuple = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase__ ):
lowercase : str = ['input_values', 'padding_mask']
def __init__( self :Optional[int] ,_UpperCamelCase :int = 1 ,_UpperCamelCase :int = 2_4_0_0_0 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :float = None ,_UpperCamelCase :float = None ,**_UpperCamelCase :List[Any] ,):
super().__init__(feature_size=_UpperCamelCase ,sampling_rate=_UpperCamelCase ,padding_value=_UpperCamelCase ,**_UpperCamelCase )
snake_case_ : Dict = chunk_length_s
snake_case_ : str = overlap
@property
def a__ ( self :Any ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__ ( self :List[str] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self :Optional[Any] ,_UpperCamelCase :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCamelCase :Optional[Union[bool, str, PaddingStrategy]] = None ,_UpperCamelCase :Optional[bool] = False ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :Optional[Union[str, TensorType]] = None ,_UpperCamelCase :Optional[int] = None ,):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
snake_case_ : Tuple = True
snake_case_ : str = bool(
isinstance(_UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
snake_case_ : Any = [np.asarray(_UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_UpperCamelCase ,np.ndarray ):
snake_case_ : Optional[int] = np.asarray(_UpperCamelCase ,dtype=np.floataa )
elif isinstance(_UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
snake_case_ : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ : Optional[Any] = [np.asarray(_UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(_UpperCamelCase ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
snake_case_ : Tuple = None
snake_case_ : Optional[Any] = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
snake_case_ : Union[str, Any] = min(array.shape[0] for array in raw_audio )
snake_case_ : Dict = int(np.floor(max_length / self.chunk_stride ) )
snake_case_ : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
snake_case_ : Any = max(array.shape[0] for array in raw_audio )
snake_case_ : List[Any] = int(np.ceil(max_length / self.chunk_stride ) )
snake_case_ : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length
snake_case_ : Union[str, Any] = """max_length"""
else:
snake_case_ : int = input_values
# normal padding on batch
if padded_inputs is None:
snake_case_ : Optional[int] = self.pad(
_UpperCamelCase ,max_length=_UpperCamelCase ,truncation=_UpperCamelCase ,padding=_UpperCamelCase ,return_attention_mask=_UpperCamelCase ,)
if padding:
snake_case_ : Tuple = padded_inputs.pop("""attention_mask""" )
snake_case_ : Optional[int] = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
snake_case_ : Dict = example[..., None]
input_values.append(example.T )
snake_case_ : List[Any] = input_values
if return_tensors is not None:
snake_case_ : Tuple = padded_inputs.convert_to_tensors(_UpperCamelCase )
return padded_inputs
| 8
| 0
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__UpperCAmelCase = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__UpperCAmelCase = json.load(f)
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return FSMTTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase : str = FSMTForConditionalGeneration.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : Any = F"""facebook/wmt19-{pair}"""
UpperCamelCase : List[str] = self.get_tokenizer(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = self.get_model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = bleu_data[pair]["""src"""]
UpperCamelCase : Optional[int] = bleu_data[pair]["""tgt"""]
UpperCamelCase : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE_, return_tensors='pt', truncation=SCREAMING_SNAKE_CASE_, padding='longest' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = model.generate(
input_ids=batch.input_ids, num_beams=8, )
UpperCamelCase : Any = tokenizer.batch_decode(
SCREAMING_SNAKE_CASE_, skip_special_tokens=SCREAMING_SNAKE_CASE_, clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = calculate_bleu(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(scores['bleu'], SCREAMING_SNAKE_CASE_ )
| 119
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = field(default="""question-answering-extractive""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
snake_case_ : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
snake_case_ : ClassVar[Features] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
snake_case_ : str = "question"
snake_case_ : str = "context"
snake_case_ : str = "answers"
@property
def UpperCamelCase_ ( self : Any) -> Dict[str, str]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 317
| 0
|
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , )
def A__ ( self , __lowerCAmelCase = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
self.enable_attention_slicing(__lowerCAmelCase )
@torch.no_grad()
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = 512 , __lowerCAmelCase = 512 , __lowerCAmelCase = 50 , __lowerCAmelCase = 7.5 , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = 1
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = len(__lowerCAmelCase )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(__lowerCAmelCase )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(__lowerCAmelCase )}.' )
# get prompt text embeddings
lowercase = self.tokenizer(
__lowerCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowercase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase , lowercase , lowercase = text_embeddings.shape
lowercase = text_embeddings.repeat(1 , __lowerCAmelCase , 1 )
lowercase = text_embeddings.view(bs_embed * num_images_per_prompt , __lowerCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase = 42
if negative_prompt is None:
lowercase = [""""""]
elif type(__lowerCAmelCase ) is not type(__lowerCAmelCase ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(__lowerCAmelCase )} !='
f' {type(__lowerCAmelCase )}.' )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = [negative_prompt]
elif batch_size != len(__lowerCAmelCase ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(__lowerCAmelCase )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
lowercase = negative_prompt
lowercase = text_input_ids.shape[-1]
lowercase = self.tokenizer(
__lowerCAmelCase , padding="""max_length""" , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""" , )
lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase = uncond_embeddings.shape[1]
lowercase = uncond_embeddings.repeat(__lowerCAmelCase , __lowerCAmelCase , 1 )
lowercase = uncond_embeddings.view(batch_size * num_images_per_prompt , __lowerCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase = torch.randn(
__lowerCAmelCase , generator=__lowerCAmelCase , device="""cpu""" , dtype=__lowerCAmelCase ).to(self.device )
lowercase = torch.randn(__lowerCAmelCase , generator=__lowerCAmelCase , device="""cpu""" , dtype=__lowerCAmelCase ).to(
self.device )
else:
lowercase = torch.randn(
__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device , dtype=__lowerCAmelCase )
lowercase = torch.randn(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device , dtype=__lowerCAmelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
lowercase = latents_reference.to(self.device )
lowercase = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowercase = (latents_shape[3] - latents_shape_reference[3]) // 2
lowercase = (latents_shape[2] - latents_shape_reference[2]) // 2
lowercase = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowercase = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowercase = 0 if dx < 0 else dx
lowercase = 0 if dy < 0 else dy
lowercase = max(-dx , 0 )
lowercase = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowercase = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase = {}
if accepts_eta:
lowercase = eta
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
# predict the noise residual
lowercase = self.unet(__lowerCAmelCase , __lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.chunk(2 )
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase = 1 / 0.1_8_2_1_5 * latents
lowercase = self.vae.decode(__lowerCAmelCase ).sample
lowercase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowercase = self.feature_extractor(self.numpy_to_pil(__lowerCAmelCase ) , return_tensors="""pt""" ).to(
self.device )
lowercase , lowercase = self.safety_checker(
images=__lowerCAmelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowercase = None
if output_type == "pil":
lowercase = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__lowerCAmelCase , nsfw_content_detected=__lowerCAmelCase )
| 32
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : str = KandinskyInpaintPipeline
snake_case__ : Optional[int] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
snake_case__ : Optional[int] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
snake_case__ : Tuple = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
snake_case__ : Dict = False
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def A__ ( self ):
"""simple docstring"""
return 100
@property
def A__ ( self ):
"""simple docstring"""
lowercase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowercase = MultilingualCLIP(__lowerCAmelCase )
lowercase = text_encoder.eval()
return text_encoder
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def A__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self ):
"""simple docstring"""
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowerCAmelCase , )
lowercase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
"""simple docstring"""
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCAmelCase )
# create init_image
lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
lowercase = np.ones((64, 64) , dtype=np.floataa )
lowercase = 0
if str(__lowerCAmelCase ).startswith("""mps""" ):
lowercase = torch.manual_seed(__lowerCAmelCase )
else:
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
lowercase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def A__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowercase = np.ones((768, 768) , dtype=np.floataa )
lowercase = 0
lowercase = """a hat"""
lowercase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
lowercase = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
lowercase = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase , lowercase = pipe_prior(
__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase = pipeline(
__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 32
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = tempfile.mkdtemp()
A_ : List[str] = BlipImageProcessor()
A_ : Dict = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' )
A_ : Any = BlipProcessor(lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self , **lowercase ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def lowerCAmelCase_ ( self , **lowercase ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def lowerCAmelCase_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
A_ : str = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
A_ : Tuple = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A_ : Union[str, Any] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : List[Any] = BlipProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : List[str] = self.prepare_image_inputs()
A_ : Any = image_processor(lowercase , return_tensors='np' )
A_ : str = processor(images=lowercase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : str = BlipProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : Any = 'lower newer'
A_ : str = processor(text=lowercase )
A_ : int = tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : Tuple = BlipProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : List[Any] = 'lower newer'
A_ : Dict = self.prepare_image_inputs()
A_ : Optional[Any] = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.get_image_processor()
A_ : Any = self.get_tokenizer()
A_ : str = BlipProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : List[Any] = processor.batch_decode(lowercase )
A_ : Dict = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = self.get_image_processor()
A_ : List[Any] = self.get_tokenizer()
A_ : List[Any] = BlipProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : Optional[Any] = 'lower newer'
A_ : str = self.prepare_image_inputs()
A_ : Any = processor(text=lowercase , images=lowercase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 140
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_UpperCAmelCase = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_UpperCAmelCase = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_UpperCAmelCase = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_UpperCAmelCase = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
_UpperCAmelCase = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
_UpperCAmelCase = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if isinstance(__lowercase ,__lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase ( __lowercase : Any ,__lowercase : Tuple ,__lowercase : Optional[int] ,__lowercase : Tuple ,__lowercase : Dict=False ):
'''simple docstring'''
A_ : str = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
A_ : int = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
A_ : int = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
A_ : List[Any] = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
A_ : Optional[int] = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
A_ : Tuple = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
A_ : List[Any] = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
A_ : int = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
A_ : Optional[Any] = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
A_ : List[Any] = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
A_ : Any = checkpoint[f'''{old_prefix}.skip_connection.weight''']
A_ : List[Any] = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : List[str] ,__lowercase : int ,__lowercase : Optional[Any] ,__lowercase : Union[str, Any]=None ):
'''simple docstring'''
A_ , A_ , A_ : Tuple = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 ,dim=0 )
A_ , A_ , A_ : List[str] = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 ,dim=0 )
A_ : Any = checkpoint[f'''{old_prefix}.norm.weight''']
A_ : str = checkpoint[f'''{old_prefix}.norm.bias''']
A_ : int = weight_q.squeeze(-1 ).squeeze(-1 )
A_ : int = bias_q.squeeze(-1 ).squeeze(-1 )
A_ : List[str] = weight_k.squeeze(-1 ).squeeze(-1 )
A_ : Any = bias_k.squeeze(-1 ).squeeze(-1 )
A_ : List[Any] = weight_v.squeeze(-1 ).squeeze(-1 )
A_ : Dict = bias_v.squeeze(-1 ).squeeze(-1 )
A_ : Any = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
A_ : Dict = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase ( __lowercase : str ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = torch.load(__lowercase ,map_location='cpu' )
A_ : Dict = {}
A_ : Dict = checkpoint['time_embed.0.weight']
A_ : Any = checkpoint['time_embed.0.bias']
A_ : Union[str, Any] = checkpoint['time_embed.2.weight']
A_ : Tuple = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
A_ : Dict = checkpoint['label_emb.weight']
A_ : Tuple = checkpoint['input_blocks.0.0.weight']
A_ : Tuple = checkpoint['input_blocks.0.0.bias']
A_ : str = unet_config['down_block_types']
A_ : List[str] = unet_config['layers_per_block']
A_ : Any = unet_config['attention_head_dim']
A_ : int = unet_config['block_out_channels']
A_ : Union[str, Any] = 1
A_ : List[str] = channels_list[0]
for i, layer_type in enumerate(__lowercase ):
A_ : List[Any] = channels_list[i]
A_ : int = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowercase ):
A_ : Any = f'''down_blocks.{i}.resnets.{j}'''
A_ : str = f'''input_blocks.{current_layer}.0'''
A_ : List[Any] = True if j == 0 and downsample_block_has_skip else False
A_ : Any = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase ,has_skip=__lowercase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowercase ):
A_ : Dict = f'''down_blocks.{i}.resnets.{j}'''
A_ : Optional[int] = f'''input_blocks.{current_layer}.0'''
A_ : str = True if j == 0 and downsample_block_has_skip else False
A_ : int = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase ,has_skip=__lowercase )
A_ : Optional[Any] = f'''down_blocks.{i}.attentions.{j}'''
A_ : Union[str, Any] = f'''input_blocks.{current_layer}.1'''
A_ : Tuple = convert_attention(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
A_ : List[Any] = f'''down_blocks.{i}.downsamplers.0'''
A_ : Dict = f'''input_blocks.{current_layer}.0'''
A_ : Tuple = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
current_layer += 1
A_ : Tuple = current_channels
# hardcoded the mid-block for now
A_ : int = 'mid_block.resnets.0'
A_ : Dict = 'middle_block.0'
A_ : int = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Tuple = 'mid_block.attentions.0'
A_ : Any = 'middle_block.1'
A_ : Tuple = convert_attention(__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Union[str, Any] = 'mid_block.resnets.1'
A_ : Any = 'middle_block.2'
A_ : int = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Tuple = 0
A_ : Optional[Any] = unet_config['up_block_types']
for i, layer_type in enumerate(__lowercase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
A_ : Dict = f'''up_blocks.{i}.resnets.{j}'''
A_ : Dict = f'''output_blocks.{current_layer}.0'''
A_ : List[str] = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase ,has_skip=__lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
A_ : Union[str, Any] = f'''up_blocks.{i}.upsamplers.0'''
A_ : List[str] = f'''output_blocks.{current_layer-1}.1'''
A_ : Optional[Any] = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
A_ : int = f'''up_blocks.{i}.resnets.{j}'''
A_ : Union[str, Any] = f'''output_blocks.{current_layer}.0'''
A_ : Optional[int] = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase ,has_skip=__lowercase )
A_ : Optional[Any] = f'''up_blocks.{i}.attentions.{j}'''
A_ : Any = f'''output_blocks.{current_layer}.1'''
A_ : List[str] = convert_attention(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
A_ : Dict = f'''up_blocks.{i}.upsamplers.0'''
A_ : Any = f'''output_blocks.{current_layer-1}.2'''
A_ : List[str] = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Any = checkpoint['out.0.weight']
A_ : Dict = checkpoint['out.0.bias']
A_ : int = checkpoint['out.2.weight']
A_ : List[str] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = strabool(args.class_cond)
_UpperCAmelCase = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
_UpperCAmelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_UpperCAmelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_UpperCAmelCase = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
_UpperCAmelCase = None
_UpperCAmelCase = con_pt_to_diffuser(args.unet_path, unet_config)
_UpperCAmelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_UpperCAmelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_UpperCAmelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_UpperCAmelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
_UpperCAmelCase = CMStochasticIterativeScheduler(**scheduler_config)
_UpperCAmelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 140
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Optional[Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : List[Any]=False , _snake_case : Dict=False , _snake_case : int=False ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Tuple ) -> Tuple:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
__magic_name__ : Optional[int] = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ : Tuple = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' )
__magic_name__ : Optional[Any] = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
__magic_name__ : Tuple = in_proj_bias[: config.hidden_size]
__magic_name__ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ : Any = in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ : int = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( _snake_case : Optional[int] ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : str = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : str ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : List[str] = dct.pop(_snake_case )
__magic_name__ : Dict = val
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : Union[str, Any] ) -> Any:
'''simple docstring'''
__magic_name__ : str = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_snake_case )
__magic_name__ : List[str] = False
__magic_name__ : Union[str, Any] = False
__magic_name__ : Dict = False
__magic_name__ : str = False
if "vqa" in checkpoint_url:
__magic_name__ : str = True
__magic_name__ : Any = 3129
__magic_name__ : Union[str, Any] = "huggingface/label-files"
__magic_name__ : Optional[Any] = "vqa2-id2label.json"
__magic_name__ : Union[str, Any] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
__magic_name__ : Any = {int(_snake_case ): v for k, v in idalabel.items()}
__magic_name__ : List[Any] = idalabel
__magic_name__ : int = {v: k for k, v in idalabel.items()}
__magic_name__ : str = ViltForQuestionAnswering(_snake_case )
elif "nlvr" in checkpoint_url:
__magic_name__ : int = True
__magic_name__ : str = 2
__magic_name__ : Optional[int] = {0: "False", 1: "True"}
__magic_name__ : Optional[Any] = {v: k for k, v in config.idalabel.items()}
__magic_name__ : Any = 3
__magic_name__ : Dict = ViltForImagesAndTextClassification(_snake_case )
elif "irtr" in checkpoint_url:
__magic_name__ : str = True
__magic_name__ : List[str] = ViltForImageAndTextRetrieval(_snake_case )
elif "mlm_itm" in checkpoint_url:
__magic_name__ : str = True
__magic_name__ : Any = ViltForMaskedLM(_snake_case )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
__magic_name__ : str = torch.hub.load_state_dict_from_url(_snake_case , map_location="cpu" )["state_dict"]
__magic_name__ : Optional[Any] = create_rename_keys(_snake_case , _snake_case , _snake_case , _snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
read_in_q_k_v(_snake_case , _snake_case )
if mlm_model or irtr_model:
__magic_name__ : List[Any] = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
__magic_name__ , __magic_name__ : Tuple = model.load_state_dict(_snake_case , strict=_snake_case )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_snake_case )
# Define processor
__magic_name__ : List[Any] = ViltImageProcessor(size=384 )
__magic_name__ : List[str] = BertTokenizer.from_pretrained("bert-base-uncased" )
__magic_name__ : Optional[Any] = ViltProcessor(_snake_case , _snake_case )
# Forward pass on example inputs (image + text)
if nlvr_model:
__magic_name__ : List[Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_snake_case ).raw )
__magic_name__ : str = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_snake_case ).raw )
__magic_name__ : Optional[Any] = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
__magic_name__ : List[Any] = processor(_snake_case , _snake_case , return_tensors="pt" )
__magic_name__ : List[str] = processor(_snake_case , _snake_case , return_tensors="pt" )
__magic_name__ : Optional[Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
__magic_name__ : Optional[int] = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=_snake_case ).raw )
if mlm_model:
__magic_name__ : List[str] = "a bunch of [MASK] laying on a [MASK]."
else:
__magic_name__ : Union[str, Any] = "How many cats are there?"
__magic_name__ : Union[str, Any] = processor(_snake_case , _snake_case , return_tensors="pt" )
__magic_name__ : List[str] = model(**_snake_case )
# Verify outputs
if mlm_model:
__magic_name__ : int = torch.Size([1, 11, 30522] )
__magic_name__ : List[Any] = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _snake_case , atol=1E-4 )
# verify masked token prediction equals "cats"
__magic_name__ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
__magic_name__ : Union[str, Any] = torch.Size([1, 3129] )
__magic_name__ : Optional[Any] = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _snake_case , atol=1E-4 )
# verify vqa prediction equals "2"
__magic_name__ : Dict = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
__magic_name__ : Any = torch.Size([1, 2] )
__magic_name__ : int = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if __name__ == "__main__":
snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
snake_case : Optional[Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 41
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
snake_case : List[str] = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = ["BeitFeatureExtractor"]
snake_case : Optional[int] = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
snake_case : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 41
| 1
|
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = FunnelConfig.from_json_file(_lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase__ = FunnelBaseModel(_lowerCAmelCase ) if base_model else FunnelModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 169
|
from ...processing_utils import ProcessorMixin
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = ["""image_processor""", """feature_extractor"""]
UpperCAmelCase_ = """TvltImageProcessor"""
UpperCAmelCase_ = """TvltFeatureExtractor"""
def __init__( self :List[str] , lowerCamelCase :Dict , lowerCamelCase :Tuple ) -> Any:
super().__init__(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
UpperCAmelCase__ = image_processor
UpperCAmelCase__ = feature_extractor
def __call__( self :Union[str, Any] , lowerCamelCase :List[str]=None , lowerCamelCase :int=None , lowerCamelCase :List[Any]=None , lowerCamelCase :Dict=None , lowerCamelCase :List[str]=False , lowerCamelCase :Optional[Any]=False , *lowerCamelCase :List[Any] , **lowerCamelCase :Dict , ) -> List[str]:
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
UpperCAmelCase__ = None
if images is not None:
UpperCAmelCase__ = self.image_processor(lowerCamelCase , mask_pixel=lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
if images_mixed is not None:
UpperCAmelCase__ = self.image_processor(lowerCamelCase , is_mixed=lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
if audio is not None:
UpperCAmelCase__ = self.feature_extractor(
lowerCamelCase , *lowerCamelCase , sampling_rate=lowerCamelCase , mask_audio=lowerCamelCase , **lowerCamelCase )
UpperCAmelCase__ = {}
if audio is not None:
output_dict.update(lowerCamelCase )
if images is not None:
output_dict.update(lowerCamelCase )
if images_mixed_dict is not None:
output_dict.update(lowerCamelCase )
return output_dict
@property
def UpperCAmelCase_ ( self :Dict ) -> Optional[Any]:
UpperCAmelCase__ = self.image_processor.model_input_names
UpperCAmelCase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 169
| 1
|
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
UpperCamelCase__ = 3
def _UpperCamelCase (a__ :int ):
"""simple docstring"""
print("""Generating primitive root of p""" )
while True:
UpperCamelCase__ = random.randrange(3 , a__ )
if pow(a__ , 2 , a__ ) == 1:
continue
if pow(a__ , a__ , a__ ) == 1:
continue
return g
def _UpperCamelCase (a__ :int ):
"""simple docstring"""
print("""Generating prime p...""" )
UpperCamelCase__ = rabin_miller.generate_large_prime(a__ ) # select large prime number.
UpperCamelCase__ = primitive_root(a__ ) # one primitive root on modulo p.
UpperCamelCase__ = random.randrange(3 , a__ ) # private_key -> have to be greater than 2 for safety.
UpperCamelCase__ = cryptomath.find_mod_inverse(pow(a__ , a__ , a__ ) , a__ )
UpperCamelCase__ = (key_size, e_a, e_a, p)
UpperCamelCase__ = (key_size, d)
return public_key, private_key
def _UpperCamelCase (a__ :str , a__ :int ):
"""simple docstring"""
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
UpperCamelCase__ , UpperCamelCase__ = generate_key(a__ )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , """w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , """w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def _UpperCamelCase ():
"""simple docstring"""
print("""Making key files...""" )
make_key_files("""elgamal""" , 2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 371
|
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase__ = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCamelCase__ = g.get_repo("""huggingface/transformers""" )
UpperCamelCase__ = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCamelCase__ = sorted([comment for comment in issue.get_comments()] , key=lambda a__ : i.created_at , reverse=a__ )
UpperCamelCase__ = comments[0] if len(a__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 87
| 0
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : List[str] = '''Wav2Vec2FeatureExtractor'''
__SCREAMING_SNAKE_CASE : List[Any] = '''AutoTokenizer'''
def __init__( self , snake_case , snake_case ):
super().__init__(snake_case , snake_case )
snake_case_ = self.feature_extractor
snake_case_ = False
@classmethod
def a ( cls , snake_case , **snake_case ):
try:
return super().from_pretrained(snake_case , **snake_case )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , snake_case , )
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(snake_case , **snake_case )
snake_case_ = WavaVecaCTCTokenizer.from_pretrained(snake_case , **snake_case )
return cls(feature_extractor=snake_case , tokenizer=snake_case )
def __call__( self , *snake_case , **snake_case ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case , **snake_case )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
snake_case_ = kwargs.pop('raw_speech' )
else:
snake_case_ = kwargs.pop('audio' , snake_case )
snake_case_ = kwargs.pop('sampling_rate' , snake_case )
snake_case_ = kwargs.pop('text' , snake_case )
if len(snake_case ) > 0:
snake_case_ = args[0]
snake_case_ = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
snake_case_ = self.feature_extractor(snake_case , *snake_case , sampling_rate=snake_case , **snake_case )
if text is not None:
snake_case_ = self.tokenizer(snake_case , **snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case_ = encodings['input_ids']
return inputs
def a ( self , *snake_case , **snake_case ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case , **snake_case )
snake_case_ = kwargs.pop('input_features' , snake_case )
snake_case_ = kwargs.pop('labels' , snake_case )
if len(snake_case ) > 0:
snake_case_ = args[0]
snake_case_ = args[1:]
if input_features is not None:
snake_case_ = self.feature_extractor.pad(snake_case , *snake_case , **snake_case )
if labels is not None:
snake_case_ = self.tokenizer.pad(snake_case , **snake_case )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
snake_case_ = labels['input_ids']
return input_features
def a ( self , *snake_case , **snake_case ):
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def a ( self , *snake_case , **snake_case ):
return self.tokenizer.decode(*snake_case , **snake_case )
@contextmanager
def a ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
snake_case_ = True
snake_case_ = self.tokenizer
yield
snake_case_ = self.feature_extractor
snake_case_ = False
| 285
|
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285
| 1
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:List[Any] = {"""vocab_file""": """vocab.json"""}
SCREAMING_SNAKE_CASE__:Optional[int] = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
SCREAMING_SNAKE_CASE__:Optional[Any] = {"""mgp-str""": 27}
class snake_case__ ( snake_case_ ):
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : Tuple = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCamelCase , lowerCamelCase="[GO]" , lowerCamelCase="[GO]" , lowerCamelCase="[s]" , lowerCamelCase="[GO]" , **lowerCamelCase ):
super().__init__(
unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , **lowerCamelCase , )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__a = json.load(lowerCamelCase )
__a = {v: k for k, v in self.vocab.items()}
@property
def a__ ( self ):
return len(self.vocab )
def a__ ( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def a__ ( self , lowerCamelCase ):
__a = []
for s in text:
char_tokens.extend(lowerCamelCase )
return char_tokens
def a__ ( self , lowerCamelCase ):
return self.vocab.get(lowerCamelCase , self.vocab.get(self.unk_token ) )
def a__ ( self , lowerCamelCase ):
return self.decoder.get(lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
if not os.path.isdir(lowerCamelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(lowerCamelCase ) )
return
__a = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
return (vocab_file,)
| 268
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:List[Any] = {"""vocab_file""": """vocab.json"""}
SCREAMING_SNAKE_CASE__:Optional[int] = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
SCREAMING_SNAKE_CASE__:Optional[Any] = {"""mgp-str""": 27}
class snake_case__ ( snake_case_ ):
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : Tuple = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCamelCase , lowerCamelCase="[GO]" , lowerCamelCase="[GO]" , lowerCamelCase="[s]" , lowerCamelCase="[GO]" , **lowerCamelCase ):
super().__init__(
unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , **lowerCamelCase , )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__a = json.load(lowerCamelCase )
__a = {v: k for k, v in self.vocab.items()}
@property
def a__ ( self ):
return len(self.vocab )
def a__ ( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def a__ ( self , lowerCamelCase ):
__a = []
for s in text:
char_tokens.extend(lowerCamelCase )
return char_tokens
def a__ ( self , lowerCamelCase ):
return self.vocab.get(lowerCamelCase , self.vocab.get(self.unk_token ) )
def a__ ( self , lowerCamelCase ):
return self.decoder.get(lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
if not os.path.isdir(lowerCamelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(lowerCamelCase ) )
return
__a = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
return (vocab_file,)
| 268
| 1
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase (__lowercase ):
"""simple docstring"""
_UpperCAmelCase :Any = ["image_processor", "tokenizer"]
_UpperCAmelCase :int = "ViltImageProcessor"
_UpperCAmelCase :List[str] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
lowercase__: List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , snake_case_ , )
lowercase__: List[Any] = kwargs.pop('''feature_extractor''' )
lowercase__: Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(snake_case_ , snake_case_ )
lowercase__: Union[str, Any] = self.image_processor
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowercase__: List[str] = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel_values + pixel_mask
lowercase__: Any = self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def _snake_case ( self ):
lowercase__: Tuple = self.tokenizer.model_input_names
lowercase__: int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _snake_case ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , snake_case_ , )
return self.image_processor_class
@property
def _snake_case ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , snake_case_ , )
return self.image_processor
| 177
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Union[str, Any] = LEDConfig
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : int = "gelu"
def __init__(self : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any]=1_3 , snake_case_ : Optional[Any]=7 , snake_case_ : Any=True , snake_case_ : List[Any]=False , snake_case_ : str=9_9 , snake_case_ : Any=3_2 , snake_case_ : Dict=2 , snake_case_ : List[Any]=4 , snake_case_ : Optional[int]=3_7 , snake_case_ : Dict=0.1 , snake_case_ : int=0.1 , snake_case_ : Optional[Any]=2_0 , snake_case_ : Optional[Any]=2 , snake_case_ : Optional[int]=1 , snake_case_ : Optional[int]=0 , snake_case_ : str=4 , ):
__a : List[Any] = parent
__a : Union[str, Any] = batch_size
__a : List[str] = seq_length
__a : Any = is_training
__a : Tuple = use_labels
__a : List[Any] = vocab_size
__a : Optional[Any] = hidden_size
__a : int = num_hidden_layers
__a : Optional[int] = num_attention_heads
__a : int = intermediate_size
__a : Union[str, Any] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Tuple = eos_token_id
__a : Optional[Any] = pad_token_id
__a : List[str] = bos_token_id
__a : List[str] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__a : Union[str, Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__a : List[str] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def lowerCAmelCase (self : Optional[int] ):
__a : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__a : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__a : int = tf.concat([input_ids, eos_tensor] , axis=1 )
__a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__a : Optional[int] = prepare_led_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
__a : Dict = tf.concat(
[tf.zeros_like(snake_case_ )[:, :-1], tf.ones_like(snake_case_ )[:, -1:]] , axis=-1 , )
__a : Tuple = global_attention_mask
return config, inputs_dict
def lowerCAmelCase (self : List[Any] , snake_case_ : Dict , snake_case_ : int ):
__a : List[str] = TFLEDModel(config=snake_case_ ).get_decoder()
__a : Dict = inputs_dict['''input_ids''']
__a : Dict = input_ids[:1, :]
__a : Any = inputs_dict['''attention_mask'''][:1, :]
__a : List[str] = 1
# first forward pass
__a : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ )
__a , __a : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__a : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
__a : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__a : Optional[int] = model(snake_case_ , attention_mask=snake_case_ )[0]
__a : int = model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__a : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__a : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
__a : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1E-3 )
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=None , ):
if attention_mask is None:
__a : Any = tf.cast(tf.math.not_equal(lowerCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__a : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__a : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__a : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCamelCase__ ( __lowercase ,__lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE : Tuple = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE : Optional[Any] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def lowerCAmelCase (self : Optional[int] ):
__a : List[str] = TFLEDModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=snake_case_ )
def lowerCAmelCase (self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase (self : Optional[Any] ):
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
def lowerCAmelCase (self : Any ):
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = tf.zeros_like(inputs_dict['''attention_mask'''] )
__a : Tuple = 2
__a : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
__a : List[str] = True
__a : Tuple = self.model_tester.seq_length
__a : Any = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(snake_case_ : Any ):
__a : str = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(snake_case_ : Optional[int] ):
__a : int = [t.numpy() for t in outputs.encoder_attentions]
__a : int = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__a : Dict = True
__a : Optional[Any] = False
__a : List[str] = False
__a : List[Any] = model_class(snake_case_ )
__a : List[str] = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
__a : List[str] = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
__a : List[str] = model_class(snake_case_ )
__a : int = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__a : List[Any] = True
__a : Dict = model_class(snake_case_ )
__a : Tuple = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
__a : List[str] = True
__a : Any = True
__a : Tuple = model_class(snake_case_ )
__a : int = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def lowerCAmelCase (self : List[str] ):
pass
def lowerCAmelCase (self : List[Any] ):
# TODO: Head-masking not yet implement
pass
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] ):
return tf.constant(lowerCAmelCase__ , dtype=tf.intaa )
lowercase__ =1e-4
@slow
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : Any ):
__a : Dict = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
__a : Union[str, Any] = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__a : Dict = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__a : List[str] = prepare_led_inputs_dict(model.config , snake_case_ , snake_case_ )
__a : List[str] = model(**snake_case_ )[0]
__a : Any = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , snake_case_ )
# change to expected output here
__a : Dict = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-3 )
def lowerCAmelCase (self : int ):
__a : Optional[Any] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
__a : int = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__a : Tuple = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__a : Dict = prepare_led_inputs_dict(model.config , snake_case_ , snake_case_ )
__a : List[str] = model(**snake_case_ )[0]
__a : List[Any] = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , snake_case_ )
# change to expected output here
__a : str = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-3 , rtol=1E-3 )
| 216
| 0
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(lowerCAmelCase__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCamelCase__ = logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 4
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : str = (32, 32)
UpperCAmelCase__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Tuple = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return (3, 32, 32)
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
UpperCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 4
UpperCAmelCase__ : Dict = 4
UpperCAmelCase__ : List[str] = (32, 32)
UpperCAmelCase__ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : List[Any] = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return (4, 32, 32)
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return (4, 32, 32)
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
UpperCAmelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model.to(_A )
UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model_accelerate.to(_A )
model_accelerate.eval()
UpperCAmelCase__ : Tuple = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase__ : Union[str, Any] = noise.to(_A )
UpperCAmelCase__ : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(_A )
UpperCAmelCase__ : Any = model_accelerate(_A , _A )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCAmelCase__ , UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A )
model_normal_load.to(_A )
model_normal_load.eval()
UpperCAmelCase__ : Optional[int] = model_normal_load(_A , _A )['''sample''']
assert torch_all_close(_A , _A , rtol=1e-3 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(_A )
UpperCAmelCase__ : Union[str, Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase__ : str = noise.to(_A )
UpperCAmelCase__ : str = torch.tensor([10] * noise.shape[0] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample
UpperCAmelCase__ : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Tuple = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-3 ) )
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = UNetaDModel
lowerCAmelCase__ = 'sample'
@property
def lowercase_ ( self : Any , _A : str=(32, 32) ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 4
UpperCAmelCase__ : List[str] = 3
UpperCAmelCase__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Dict = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return (3, 32, 32)
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
UpperCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
UpperCAmelCase__ : List[str] = self.dummy_input
UpperCAmelCase__ : Dict = floats_tensor((4, 3) + (256, 256) ).to(_A )
UpperCAmelCase__ : Optional[Any] = noise
UpperCAmelCase__ : Any = model(**_A )
assert image is not None, "Make sure output is not None"
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(_A )
UpperCAmelCase__ : Optional[Any] = 4
UpperCAmelCase__ : List[str] = 3
UpperCAmelCase__ : Dict = (256, 256)
UpperCAmelCase__ : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample
UpperCAmelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Tuple = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(_A )
UpperCAmelCase__ : str = 4
UpperCAmelCase__ : Any = 3
UpperCAmelCase__ : int = (32, 32)
UpperCAmelCase__ : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(_A )
UpperCAmelCase__ : Optional[Any] = torch.tensor(batch_size * [1e-4] ).to(_A )
with torch.no_grad():
UpperCAmelCase__ : int = model(_A , _A ).sample
UpperCAmelCase__ : Dict = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase__ : Any = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
| 299
| 1
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class lowercase_ :
pass
| 122
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 122
| 1
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' ,'False' ) ) is not True ,reason='Skipping test because should only be run when releasing minor transformers version' ,)
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class snake_case_ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[int] )->Any:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=_snake_case , )
assert hasattr(self , """env""" )
def UpperCAmelCase__ ( self : Dict , _snake_case : Dict )->int:
'''simple docstring'''
__lowerCAmelCase : int = F'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'''
# distributed data settings
__lowerCAmelCase : Any = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_snake_case , instance_count=_snake_case , instance_type=self.instance_type , debugger_hook_config=_snake_case , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_snake_case , py_version="""py36""" , )
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : str )->Optional[int]:
'''simple docstring'''
TrainingJobAnalytics(_snake_case ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def UpperCAmelCase__ ( self : str , _snake_case : Tuple )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.create_estimator(_snake_case )
# run training
estimator.fit()
# result dataframe
__lowerCAmelCase : List[str] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowerCAmelCase : List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
__lowerCAmelCase : Dict = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCAmelCase : Optional[int] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _snake_case )
| 232
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class snake_case_ ( __lowercase ):
A_ = 'unispeech-sat'
def __init__( self : str , _snake_case : List[Any]=32 , _snake_case : Union[str, Any]=768 , _snake_case : Tuple=12 , _snake_case : Optional[int]=12 , _snake_case : Optional[Any]=3072 , _snake_case : Tuple="gelu" , _snake_case : int=0.1 , _snake_case : List[Any]=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : str=0.0 , _snake_case : List[str]=0.0 , _snake_case : int=0.1 , _snake_case : Optional[Any]=0.1 , _snake_case : Optional[Any]=0.02 , _snake_case : int=1E-5 , _snake_case : Dict="group" , _snake_case : Optional[Any]="gelu" , _snake_case : Optional[Any]=(512, 512, 512, 512, 512, 512, 512) , _snake_case : int=(5, 2, 2, 2, 2, 2, 2) , _snake_case : int=(10, 3, 3, 3, 3, 2, 2) , _snake_case : Any=False , _snake_case : Optional[Any]=128 , _snake_case : Tuple=16 , _snake_case : str=False , _snake_case : Dict=True , _snake_case : Tuple=0.05 , _snake_case : str=10 , _snake_case : Tuple=2 , _snake_case : List[Any]=0.0 , _snake_case : str=10 , _snake_case : Any=0 , _snake_case : List[Any]=320 , _snake_case : Union[str, Any]=2 , _snake_case : Dict=0.1 , _snake_case : Dict=100 , _snake_case : Union[str, Any]=256 , _snake_case : int=256 , _snake_case : Union[str, Any]=0.1 , _snake_case : Optional[Any]="mean" , _snake_case : int=False , _snake_case : str=False , _snake_case : str=256 , _snake_case : List[Any]=(512, 512, 512, 512, 1500) , _snake_case : Optional[int]=(5, 3, 3, 1, 1) , _snake_case : Tuple=(1, 2, 3, 1, 1) , _snake_case : Dict=512 , _snake_case : Union[str, Any]=0 , _snake_case : List[str]=1 , _snake_case : Optional[Any]=2 , _snake_case : Optional[int]=504 , **_snake_case : Optional[int] , )->Union[str, Any]:
'''simple docstring'''
super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case )
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : List[Any] = feat_extract_norm
__lowerCAmelCase : int = feat_extract_activation
__lowerCAmelCase : Union[str, Any] = list(_snake_case )
__lowerCAmelCase : str = list(_snake_case )
__lowerCAmelCase : Optional[Any] = list(_snake_case )
__lowerCAmelCase : Optional[int] = conv_bias
__lowerCAmelCase : Dict = num_conv_pos_embeddings
__lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups
__lowerCAmelCase : Tuple = len(self.conv_dim )
__lowerCAmelCase : int = num_hidden_layers
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : str = hidden_act
__lowerCAmelCase : Any = num_attention_heads
__lowerCAmelCase : Optional[int] = hidden_dropout
__lowerCAmelCase : str = attention_dropout
__lowerCAmelCase : int = activation_dropout
__lowerCAmelCase : Union[str, Any] = feat_proj_dropout
__lowerCAmelCase : List[str] = final_dropout
__lowerCAmelCase : Dict = layerdrop
__lowerCAmelCase : Tuple = layer_norm_eps
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Optional[int] = num_clusters
__lowerCAmelCase : List[Any] = do_stable_layer_norm
__lowerCAmelCase : Tuple = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase : Dict = apply_spec_augment
__lowerCAmelCase : List[Any] = mask_time_prob
__lowerCAmelCase : List[str] = mask_time_length
__lowerCAmelCase : Dict = mask_time_min_masks
__lowerCAmelCase : Tuple = mask_feature_prob
__lowerCAmelCase : List[str] = mask_feature_length
__lowerCAmelCase : str = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowerCAmelCase : Optional[int] = num_codevectors_per_group
__lowerCAmelCase : List[Any] = num_codevector_groups
__lowerCAmelCase : int = contrastive_logits_temperature
__lowerCAmelCase : str = feat_quantizer_dropout
__lowerCAmelCase : int = num_negatives
__lowerCAmelCase : str = codevector_dim
__lowerCAmelCase : Any = proj_codevector_dim
__lowerCAmelCase : Any = diversity_loss_weight
# ctc loss
__lowerCAmelCase : Tuple = ctc_loss_reduction
__lowerCAmelCase : Any = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCAmelCase : Any = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase : List[str] = list(_snake_case )
__lowerCAmelCase : List[str] = list(_snake_case )
__lowerCAmelCase : Optional[int] = list(_snake_case )
__lowerCAmelCase : Optional[int] = xvector_output_dim
@property
def UpperCAmelCase__ ( self : Optional[Any] )->Any:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 232
| 1
|
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE_ : list[float] ):
if len(SCREAMING_SNAKE_CASE_ ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
__lowerCAmelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''unc-nlp/lxmert-base-uncased''': 5_12,
}
lowerCAmelCase_ = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = LxmertTokenizer
def __init__( self : Union[str, Any] , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Dict=True , _UpperCamelCase : Any="[UNK]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : List[Any]="[PAD]" , _UpperCamelCase : Union[str, Any]="[CLS]" , _UpperCamelCase : str="[MASK]" , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : List[str] , ) ->Any:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars
):
snake_case_ = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**_UpperCamelCase )
snake_case_ = do_lower_case
def snake_case__( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=None ) ->List[Any]:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 8
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase : Optional[Any] = "Create a default config file for Accelerate with only a few flags set."
def UpperCAmelCase_ (_lowerCAmelCase : Optional[int]="no" , _lowerCAmelCase : str = default_json_config_file , _lowerCAmelCase : bool = False ):
__UpperCamelCase : List[Any] = Path(_lowerCAmelCase )
path.parent.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
__UpperCamelCase : Tuple = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
__UpperCamelCase : Tuple = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
__UpperCamelCase : Tuple = torch.cuda.device_count()
__UpperCamelCase : Union[str, Any] = num_gpus
__UpperCamelCase : Optional[int] = False
if num_gpus > 1:
__UpperCamelCase : Any = "MULTI_GPU"
else:
__UpperCamelCase : int = "NO"
elif is_xpu_available() and use_xpu:
__UpperCamelCase : Optional[int] = torch.xpu.device_count()
__UpperCamelCase : Tuple = num_xpus
__UpperCamelCase : str = False
if num_xpus > 1:
__UpperCamelCase : str = "MULTI_XPU"
else:
__UpperCamelCase : int = "NO"
elif is_npu_available():
__UpperCamelCase : Optional[Any] = torch.npu.device_count()
__UpperCamelCase : Optional[int] = num_npus
__UpperCamelCase : Optional[int] = False
if num_npus > 1:
__UpperCamelCase : Any = "MULTI_NPU"
else:
__UpperCamelCase : str = "NO"
else:
__UpperCamelCase : List[str] = 0
__UpperCamelCase : Any = True
__UpperCamelCase : Dict = 1
__UpperCamelCase : int = "NO"
__UpperCamelCase : Union[str, Any] = ClusterConfig(**_lowerCAmelCase )
config.to_json_file(_lowerCAmelCase )
return path
def UpperCAmelCase_ (_lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
__UpperCamelCase : Any = parser.add_parser("default" , parents=_lowerCAmelCase , help=_lowerCAmelCase , formatter_class=_lowerCAmelCase )
parser.add_argument(
"--config_file" , default=_lowerCAmelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=_lowerCAmelCase , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=_lowerCAmelCase )
return parser
def UpperCAmelCase_ (_lowerCAmelCase : Dict ):
__UpperCamelCase : Dict = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 356
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase : List[str] = None
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : int = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
lowercase : Optional[Any] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
lowercase : List[str] = {
"google/rembert": 256,
}
lowercase : Tuple = "▁"
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : Optional[int] = VOCAB_FILES_NAMES
lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Optional[int] = RemBertTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="[CLS]" , __UpperCamelCase="[SEP]" , __UpperCamelCase="<unk>" , __UpperCamelCase="[SEP]" , __UpperCamelCase="<pad>" , __UpperCamelCase="[CLS]" , __UpperCamelCase="[MASK]" , **__UpperCamelCase , ) -> Dict:
'''simple docstring'''
__UpperCamelCase : str = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , **__UpperCamelCase , )
__UpperCamelCase : Any = do_lower_case
__UpperCamelCase : List[str] = remove_space
__UpperCamelCase : Optional[Any] = keep_accents
__UpperCamelCase : Union[str, Any] = vocab_file
__UpperCamelCase : Any = False if not self.vocab_file else True
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCamelCase : Any = [self.sep_token_id]
__UpperCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCamelCase : Tuple = [self.sep_token_id]
__UpperCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(__UpperCamelCase ) )
return
__UpperCamelCase : Optional[int] = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 171
| 0
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class SCREAMING_SNAKE_CASE__ :
snake_case__ : int
snake_case__ : Node | None = None
snake_case__ : Node | None = None
def SCREAMING_SNAKE_CASE_ ( ) -> Node | None:
"""simple docstring"""
a_ : Dict = Node(1 )
a_ : Any = Node(2 )
a_ : List[Any] = Node(3 )
a_ : Tuple = Node(4 )
a_ : int = Node(5 )
return tree
def SCREAMING_SNAKE_CASE_ ( __A : Node | None ) -> list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE_ ( __A : Node | None ) -> list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE_ ( __A : Node | None ) -> list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE_ ( __A : Node | None ) -> int:
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE_ ( __A : Node | None ) -> Sequence[Node | None]:
"""simple docstring"""
a_ : list[Any] = []
if root is None:
return output
a_ : Optional[int] = deque([root] )
while process_queue:
a_ : str = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE_ ( __A : Node | None , __A : int ) -> Sequence[Node | None]:
"""simple docstring"""
a_ : list[Any] = []
def populate_output(__A : Node | None , __A : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__A , __A )
return output
def SCREAMING_SNAKE_CASE_ ( __A : Node | None , __A : int ) -> Sequence[Node | None]:
"""simple docstring"""
a_ : list[Any] = []
def populate_output(__A : Node | None , __A : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__A , __A )
return output
def SCREAMING_SNAKE_CASE_ ( __A : Node | None ) -> Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
a_ : list[Sequence[Node | None]] = []
a_ : List[str] = 0
a_ : Any = height(__A )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__A , __A ) )
a_ : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__A , __A ) )
a_ : Any = 0
return output
def SCREAMING_SNAKE_CASE_ ( ) -> None: # Main function for testing.
"""simple docstring"""
a_ : str = make_tree()
print(F"""In-order Traversal: {inorder(__A )}""" )
print(F"""Pre-order Traversal: {preorder(__A )}""" )
print(F"""Post-order Traversal: {postorder(__A )}""" , '\n' )
print(F"""Height of Tree: {height(__A )}""" , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(__A ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(__A ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(__A , level=__A ) )
print('\nZigZag order Traversal: ' )
print(zigzag(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 32
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase_ : Tuple = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : str = '''facebook/nllb-200-distilled-600M'''
snake_case__ : Union[str, Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
snake_case__ : Optional[Any] = '''translator'''
snake_case__ : Tuple = AutoTokenizer
snake_case__ : Union[str, Any] = AutoModelForSeqaSeqLM
snake_case__ : Dict = LANGUAGE_CODES
snake_case__ : str = ['''text''', '''text''', '''text''']
snake_case__ : Tuple = ['''text''']
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""" )
a_ : str = self.lang_to_code[src_lang]
a_ : Any = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
SCREAMING_SNAKE_CASE__ , return_tensors='pt' , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
return self.model.generate(**SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
| 32
| 1
|
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def A__ ( SCREAMING_SNAKE_CASE__) -> Tuple:
return 1.0 / (1.0 + np.exp(-_outputs))
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
__snake_case: Optional[int] = np.max(_outputs , axis=-1 , keepdims=SCREAMING_SNAKE_CASE__)
__snake_case: List[str] = np.exp(_outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """sigmoid"""
lowerCAmelCase__ = """softmax"""
lowerCAmelCase__ = """none"""
@add_end_docstrings(
__lowerCamelCase , R"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self : str , **A : str ):
super().__init__(**A )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def UpperCAmelCase__ ( self : Union[str, Any] , A : Any=None , A : Union[str, Any]=None , A : Union[str, Any]="" , **A : Optional[int] ):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
__snake_case: str = tokenizer_kwargs
__snake_case: List[str] = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
__snake_case: int = self.model.config.return_all_scores
if isinstance(A , A ) or top_k is None:
__snake_case: Tuple = top_k
__snake_case: Tuple = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , A , )
if return_all_scores:
__snake_case: Optional[int] = None
else:
__snake_case: int = 1
if isinstance(A , A ):
__snake_case: Optional[int] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__snake_case: Tuple = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , *A : str , **A : List[Any] ):
__snake_case: Any = super().__call__(*A , **A )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__snake_case: int = """top_k""" not in kwargs
if isinstance(args[0] , A ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def UpperCAmelCase__ ( self : Optional[int] , A : Optional[int] , **A : Dict ):
__snake_case: Dict = self.framework
if isinstance(A , A ):
return self.tokenizer(**A , return_tensors=A , **A )
elif isinstance(A , A ) and len(A ) == 1 and isinstance(inputs[0] , A ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=A , **A )
elif isinstance(A , A ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(A , return_tensors=A , **A )
def UpperCAmelCase__ ( self : Optional[int] , A : Optional[Any] ):
return self.model(**A )
def UpperCAmelCase__ ( self : int , A : Tuple , A : Tuple=None , A : Dict=1 , A : Optional[Any]=True ):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__snake_case: Dict = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__snake_case: List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
__snake_case: int = self.model.config.function_to_apply
else:
__snake_case: int = ClassificationFunction.NONE
__snake_case: str = model_outputs["""logits"""][0]
__snake_case: Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__snake_case: int = sigmoid(A )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__snake_case: List[str] = softmax(A )
elif function_to_apply == ClassificationFunction.NONE:
__snake_case: Dict = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__snake_case: Optional[Any] = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(A )
]
if not _legacy:
dict_scores.sort(key=lambda A : x["score"] , reverse=A )
if top_k is not None:
__snake_case: int = dict_scores[:top_k]
return dict_scores
| 293
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCAmelCase : Any = 250_004
__UpperCAmelCase : List[str] = 250_020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MBartaaTokenizer
lowerCAmelCase__ = MBartaaTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case: Optional[int] = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Any = """<s>"""
__snake_case: Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 1_054 )
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Dict = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
__snake_case: int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case: Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__snake_case: List[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case: int = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
# fmt: off
__snake_case: List[str] = {"""input_ids""": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__snake_case: Any = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: Optional[int] = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: Union[str, Any] = self.tokenizer_class.from_pretrained(A , **A )
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Tuple = tokenizer_r.save_pretrained(A )
__snake_case: Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__snake_case: Dict = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: Tuple = tokenizer_r.from_pretrained(A )
__snake_case: Optional[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
__snake_case: Tuple = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: List[str] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: List[Any] = tokenizer_r.from_pretrained(A )
__snake_case: Dict = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: Dict = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__snake_case: Any = tokenizer_r.from_pretrained(A )
__snake_case: Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = """facebook/mbart-large-50-one-to-many-mmt"""
lowerCAmelCase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCAmelCase__ = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def UpperCAmelCase__ ( cls : int ):
__snake_case: MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__snake_case: str = 1
return cls
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 250_038 )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.assertIn(A , self.tokenizer.all_special_ids )
__snake_case: Dict = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__snake_case: str = self.tokenizer.decode(A , skip_special_tokens=A )
__snake_case: Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
__snake_case: Union[str, Any] = 10
__snake_case: List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[0] , A )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(A ) , A )
def UpperCAmelCase__ ( self : Tuple ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250_053, 250_001] )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = tempfile.mkdtemp()
__snake_case: Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
__snake_case: Union[str, Any] = MBartaaTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
__snake_case: List[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__snake_case: Optional[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__snake_case: List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
__snake_case: Union[str, Any] = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
__snake_case: Dict = targets["""input_ids"""]
__snake_case: Any = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# en_XX, A, test, EOS
"""input_ids""": [[250_004, 62, 3_034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
} , )
| 293
| 1
|
'''simple docstring'''
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
lowerCamelCase__ : str = 0.0
for coeff in reversed(UpperCamelCase ):
lowerCamelCase__ : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
_A : Any =(0.0, 0.0, 5.0, 9.3, 7.0)
_A : Optional[Any] =10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 41
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[str] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False ) -> Union[str, Any]:
lowerCamelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
lowerCamelCase__ : str = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : List[str] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ : str = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : List[str] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
lowerCamelCase__ : Dict = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : int = dct.pop(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = val
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCamelCase__ : List[str] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : int = False
if "vqa" in checkpoint_url:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Any = 3129
lowerCamelCase__ : Tuple = """huggingface/label-files"""
lowerCamelCase__ : List[str] = """vqa2-id2label.json"""
lowerCamelCase__ : str = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Any = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Union[str, Any] = idalabel
lowerCamelCase__ : int = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = ViltForQuestionAnswering(UpperCamelCase )
elif "nlvr" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Any = {0: """False""", 1: """True"""}
lowerCamelCase__ : int = {v: k for k, v in config.idalabel.items()}
lowerCamelCase__ : Any = 3
lowerCamelCase__ : List[str] = ViltForImagesAndTextClassification(UpperCamelCase )
elif "irtr" in checkpoint_url:
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : Optional[int] = ViltForImageAndTextRetrieval(UpperCamelCase )
elif "mlm_itm" in checkpoint_url:
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Optional[Any] = ViltForMaskedLM(UpperCamelCase )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
lowerCamelCase__ : Dict = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" )["""state_dict"""]
lowerCamelCase__ : List[Any] = create_rename_keys(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase )
if mlm_model or irtr_model:
lowerCamelCase__ : List[str] = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCamelCase )
# Define processor
lowerCamelCase__ : Optional[int] = ViltImageProcessor(size=384 )
lowerCamelCase__ : List[str] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowerCamelCase__ : Union[str, Any] = ViltProcessor(UpperCamelCase , UpperCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCamelCase__ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCamelCase ).raw )
lowerCamelCase__ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCamelCase ).raw )
lowerCamelCase__ : Dict = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
lowerCamelCase__ : Optional[int] = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Dict = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : List[str] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCamelCase__ : str = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=UpperCamelCase ).raw )
if mlm_model:
lowerCamelCase__ : str = """a bunch of [MASK] laying on a [MASK]."""
else:
lowerCamelCase__ : Optional[int] = """How many cats are there?"""
lowerCamelCase__ : List[str] = processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Union[str, Any] = model(**UpperCamelCase )
# Verify outputs
if mlm_model:
lowerCamelCase__ : Tuple = torch.Size([1, 11, 30522] )
lowerCamelCase__ : int = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCamelCase__ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCamelCase__ : str = torch.Size([1, 3129] )
lowerCamelCase__ : Any = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCamelCase__ : Tuple = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCamelCase__ : str = torch.Size([1, 2] )
lowerCamelCase__ : Optional[Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_A : Tuple =parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 41
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _snake_case ( _UpperCAmelCase):
UpperCamelCase__ : Optional[Any] ="beit"
def __init__( self : Any, __lowercase : List[Any]=8192, __lowercase : Tuple=768, __lowercase : Union[str, Any]=12, __lowercase : str=12, __lowercase : int=3072, __lowercase : Tuple="gelu", __lowercase : Any=0.0, __lowercase : Optional[int]=0.0, __lowercase : List[str]=0.02, __lowercase : Dict=1e-1_2, __lowercase : Optional[int]=224, __lowercase : str=16, __lowercase : List[Any]=3, __lowercase : Optional[Any]=False, __lowercase : Tuple=False, __lowercase : Optional[int]=False, __lowercase : Optional[Any]=False, __lowercase : Optional[int]=0.1, __lowercase : List[Any]=0.1, __lowercase : Union[str, Any]=True, __lowercase : List[str]=[3, 5, 7, 11], __lowercase : List[Any]=[1, 2, 3, 6], __lowercase : Tuple=True, __lowercase : List[Any]=0.4, __lowercase : str=256, __lowercase : Union[str, Any]=1, __lowercase : Union[str, Any]=False, __lowercase : Optional[Any]=255, **__lowercase : Any, ):
super().__init__(**_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = use_mask_token
lowercase__ = use_absolute_position_embeddings
lowercase__ = use_relative_position_bias
lowercase__ = use_shared_relative_position_bias
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ = out_indices
lowercase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = auxiliary_channels
lowercase__ = auxiliary_num_convs
lowercase__ = auxiliary_concat_input
lowercase__ = semantic_loss_ignore_index
class _snake_case ( _UpperCAmelCase):
UpperCamelCase__ : Dict =version.parse("""1.11""")
@property
def A__ ( self : Optional[Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def A__ ( self : str ):
return 1e-4
| 352
|
from __future__ import annotations
from typing import TypedDict
class _snake_case ( lowercase__):
UpperCamelCase__ : str
UpperCamelCase__ : int
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
lowercase__ = all_rotations(SCREAMING_SNAKE_CASE_ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
lowercase__ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(SCREAMING_SNAKE_CASE_ ),
}
return response
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
lowercase__ = int(SCREAMING_SNAKE_CASE_ )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
lowercase__ = [""] * len(SCREAMING_SNAKE_CASE_ )
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowercase_ = """Provide a string that I will generate its BWT transform: """
lowercase_ = input(entry_msg).strip()
lowercase_ = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
lowercase_ = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 224
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : torch.FloatTensor
class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__):
@register_to_config
def __init__( self : Dict , lowercase_ : int = 16 , lowercase_ : int = 88 , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 1 , lowercase_ : float = 0.0 , lowercase_ : int = 32 , lowercase_ : Optional[int] = None , lowercase_ : bool = False , lowercase_ : Optional[int] = None , lowercase_ : str = "geglu" , lowercase_ : bool = True , lowercase_ : bool = True , ):
super().__init__()
snake_case_ : Tuple = num_attention_heads
snake_case_ : int = attention_head_dim
snake_case_ : List[Any] = num_attention_heads * attention_head_dim
snake_case_ : Tuple = in_channels
snake_case_ : List[Any] = torch.nn.GroupNorm(num_groups=lowercase_ , num_channels=lowercase_ , eps=1E-6 , affine=lowercase_ )
snake_case_ : int = nn.Linear(lowercase_ , lowercase_ )
# 3. Define transformers blocks
snake_case_ : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
lowercase_ , lowercase_ , lowercase_ , dropout=lowercase_ , cross_attention_dim=lowercase_ , activation_fn=lowercase_ , attention_bias=lowercase_ , double_self_attention=lowercase_ , norm_elementwise_affine=lowercase_ , )
for d in range(lowercase_ )
] )
snake_case_ : Tuple = nn.Linear(lowercase_ , lowercase_ )
def _snake_case ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : List[Any]=None , lowercase_ : Optional[Any]=1 , lowercase_ : str=None , lowercase_ : bool = True , ):
snake_case_, snake_case_, snake_case_, snake_case_ : List[Any] = hidden_states.shape
snake_case_ : Optional[int] = batch_frames // num_frames
snake_case_ : Dict = hidden_states
snake_case_ : List[Any] = hidden_states[None, :].reshape(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
snake_case_ : Union[str, Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
snake_case_ : Optional[int] = self.norm(lowercase_ )
snake_case_ : Optional[int] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , lowercase_ , lowercase_ )
snake_case_ : Any = self.proj_in(lowercase_ )
# 2. Blocks
for block in self.transformer_blocks:
snake_case_ : Union[str, Any] = block(
lowercase_ , encoder_hidden_states=lowercase_ , timestep=lowercase_ , cross_attention_kwargs=lowercase_ , class_labels=lowercase_ , )
# 3. Output
snake_case_ : Dict = self.proj_out(lowercase_ )
snake_case_ : Dict = (
hidden_states[None, None, :]
.reshape(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
snake_case_ : Union[str, Any] = hidden_states.reshape(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
snake_case_ : Optional[int] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=lowercase_ )
| 264
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
lowercase__ : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def __lowercase ( ):
snake_case_ : Optional[Any] = Github(os.environ['''GITHUB_TOKEN'''] )
snake_case_ : Any = g.get_repo('''huggingface/diffusers''' )
snake_case_ : Any = repo.get_issues(state='''open''' )
for issue in open_issues:
snake_case_ : str = sorted(issue.get_comments() , key=lambda _a : i.created_at , reverse=_a )
snake_case_ : Dict = comments[0] if len(_a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 264
| 1
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=13 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=5 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=37 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=2 ,) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : Dict = image_size
UpperCAmelCase_ : Any = patch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : List[str] = use_labels
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Any = scope
UpperCAmelCase_ : List[str] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : Tuple = (image_size // patch_size) ** 2
UpperCAmelCase_ : Union[str, Any] = num_patches + 1
def a__ ( self ) -> int:
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> Tuple:
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_SCREAMING_SNAKE_CASE ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCAmelCase_ : Dict = ViTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = ViTForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : Optional[Any] = 1
UpperCAmelCase_ : str = ViTForMaskedImageModeling(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : int = self.type_sequence_label_size
UpperCAmelCase_ : str = ViTForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : int = ViTForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Dict = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) : List[Any] = config_and_inputs
UpperCAmelCase_ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __a( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCAmelCase = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : List[str] = ViTModelTester(self )
UpperCAmelCase_ : Dict = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,has_text_modality=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def a__ ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def a__ ( self ) -> Optional[Any]:
pass
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : str = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE ,nn.Linear ) )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : str = [*signature.parameters.keys()]
UpperCAmelCase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self ) -> Dict:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[str] = ViTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a__ ( self ) -> Tuple:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : Tuple = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCAmelCase_ : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
@slow
def a__ ( self ) -> Dict:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
UpperCAmelCase_ : Optional[int] = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' ,size=480 )
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
UpperCAmelCase_ : List[str] = inputs.pixel_values.to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Tuple = model(_SCREAMING_SNAKE_CASE ,interpolate_pos_encoding=_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCAmelCase_ : str = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Optional[int] = ViTModel.from_pretrained('''facebook/dino-vits8''' ,torch_dtype=torch.floataa ,device_map='''auto''' )
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : Any = prepare_img()
UpperCAmelCase_ : Any = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
UpperCAmelCase_ : str = inputs.pixel_values.to(_SCREAMING_SNAKE_CASE )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase_ : int = model(_SCREAMING_SNAKE_CASE )
| 359
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=13 ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=19 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=5 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=37 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=None ,) -> Dict:
UpperCAmelCase_ : Optional[Any] = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Optional[int] = seq_length
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : Any = use_input_mask
UpperCAmelCase_ : Tuple = use_token_type_ids
UpperCAmelCase_ : Optional[int] = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Optional[int] = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Any = num_labels
UpperCAmelCase_ : Optional[Any] = num_choices
UpperCAmelCase_ : List[str] = scope
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : str = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = EsmConfig(
vocab_size=33 ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,is_folding_model=_SCREAMING_SNAKE_CASE ,esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} ,)
return config
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ : Optional[int] = EsmForProteinFolding(config=_SCREAMING_SNAKE_CASE ).float()
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.positions.shape ,(8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape ,(8, self.batch_size, self.seq_length, 7, 2) )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
) : Optional[Any] = config_and_inputs
UpperCAmelCase_ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = False
lowerCAmelCase = (EsmForProteinFolding,) if is_torch_available() else ()
lowerCAmelCase = ()
lowerCAmelCase = {} if is_torch_available() else {}
lowerCAmelCase = False
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Optional[int] = EsmFoldModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def a__ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
@unittest.skip('''Does not support attention outputs''' )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip
def a__ ( self ) -> Dict:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def a__ ( self ) -> Union[str, Any]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def a__ ( self ) -> List[Any]:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def a__ ( self ) -> Any:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def a__ ( self ) -> Dict:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def a__ ( self ) -> str:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def a__ ( self ) -> int:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def a__ ( self ) -> List[Any]:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def a__ ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def a__ ( self ) -> List[str]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def a__ ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def a__ ( self ) -> Dict:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a__ ( self ) -> List[Any]:
pass
@require_torch
class __a( _a ):
"""simple docstring"""
@slow
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : List[str] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
UpperCAmelCase_ : str = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )['''positions''']
UpperCAmelCase_ : List[str] = torch.tensor([2.58_28, 0.79_93, -10.93_34] ,dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
| 235
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class UpperCamelCase_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = BarthezTokenizer
lowerCAmelCase = BarthezTokenizerFast
lowerCAmelCase = True
lowerCAmelCase = True
def _UpperCamelCase ( self ) -> Union[str, Any]:
super().setUp()
snake_case_ = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a )
snake_case_ = tokenizer
def _UpperCamelCase ( self ) -> Tuple:
snake_case_ = '<pad>'
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _UpperCamelCase ( self ) -> str:
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(a ) , 10_11_22 )
def _UpperCamelCase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def _UpperCamelCase ( self ) -> Any:
snake_case_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
snake_case_ = [0, 57, 30_18, 7_03_07, 91, 2]
snake_case_ = self.tokenizer(
a , max_length=len(a ) , padding=a , truncation=a , return_tensors='pt' )
self.assertIsInstance(a , a )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
snake_case_ = batch.input_ids.tolist()[0]
self.assertListEqual(a , a )
def _UpperCamelCase ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = tokenizer.tokenize(a )
snake_case_ = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
snake_case_ = tokenizer.encode(a , add_special_tokens=a )
snake_case_ = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(a )
snake_case_ = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
@slow
def _UpperCamelCase ( self ) -> Optional[int]:
# fmt: off
snake_case_ = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
snake_case_ = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=a , )
| 178
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = ['''image_processor''', '''tokenizer''']
lowerCAmelCase = '''BlipImageProcessor'''
lowerCAmelCase = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , a , a ) -> Tuple:
snake_case_ = False
super().__init__(a , a )
snake_case_ = self.image_processor
def __call__( self , a = None , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = False , a = False , a = False , a = False , a = False , a = True , a = None , **a , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
snake_case_ = self.tokenizer
snake_case_ = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
# add pixel_values
snake_case_ = self.image_processor(a , return_tensors=a )
if text is not None:
snake_case_ = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
else:
snake_case_ = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def _UpperCamelCase ( self , *a , **a ) -> int:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCamelCase ( self , *a , **a ) -> Any:
return self.tokenizer.decode(*a , **a )
@property
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = self.tokenizer.model_input_names
snake_case_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 178
| 1
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__UpperCAmelCase : List[Any] = logging.get_logger(__name__)
class __snake_case ( _a ):
'''simple docstring'''
lowerCAmelCase__ = ["""audio_values""", """audio_mask"""]
def __init__( self : Union[str, Any] , A : Any=2_048 , A : Any=1 , A : Optional[int]=[16, 16] , A : str=128 , A : int=44_100 , A : Any=86 , A : str=2_048 , A : Optional[Any]=0.0 , **A : Dict , ):
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , **A , )
__snake_case: str = spectrogram_length
__snake_case: Optional[int] = num_channels
__snake_case: Optional[Any] = patch_size
__snake_case: Any = feature_size // self.patch_size[1]
__snake_case: Optional[int] = n_fft
__snake_case: Optional[int] = sampling_rate // hop_length_to_sampling_rate
__snake_case: Optional[Any] = sampling_rate
__snake_case: Optional[int] = padding_value
__snake_case: Tuple = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=A , norm="""slaney""" , mel_scale="""slaney""" , ).T
def UpperCAmelCase__ ( self : List[str] , A : Tuple ):
__snake_case: Tuple = spectrogram(
A , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
__snake_case: List[Any] = log_spec[:, :-1]
__snake_case: Union[str, Any] = log_spec - 20.0
__snake_case: Any = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : int , A : Tuple , A : Tuple = None , A : Dict = True , A : int = None , A : int = False , A : List[Any] = False , **A : List[str] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__snake_case: int = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__snake_case: Tuple = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__snake_case: Dict = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
__snake_case: Optional[int] = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__snake_case: Dict = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__snake_case: Any = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__snake_case: Optional[Any] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A ):
__snake_case: Tuple = [np.asarray(A , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__snake_case: Dict = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__snake_case: str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__snake_case: Any = np.array(A ).astype(np.floataa )
# convert into correct format for padding
__snake_case: Optional[int] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__snake_case: List[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__snake_case: Tuple = padded_audio_features * self.padding_value
for i in range(len(A ) ):
__snake_case: Any = audio_features[i]
__snake_case: Any = feature
# return as BatchFeature
if return_attention_mask:
__snake_case: Union[str, Any] = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__snake_case: Dict = {'audio_values': padded_audio_features}
__snake_case: Tuple = BatchFeature(data=A , tensor_type=A )
return encoded_inputs
| 368
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: str = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetDownsampleBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Union[str, Any] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: Union[str, Any] = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : List[str] ):
__snake_case , __snake_case: List[str] = super().prepare_init_args_and_inputs_for_common()
__snake_case: List[Any] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : int ):
__snake_case , __snake_case: Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Optional[Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Any ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[Any] ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : int ):
__snake_case: str = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: str = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[str] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaD # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
__snake_case: List[str] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Tuple = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: int = super().prepare_init_args_and_inputs_for_common()
__snake_case: int = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetUpsampleBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[int] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[Any] = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A , include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Optional[Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : int ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Optional[Any] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : str ):
__snake_case: Union[str, Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : int ):
__snake_case: Any = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(A )
| 293
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if openai_config_file == "":
_SCREAMING_SNAKE_CASE =OpenAIGPTConfig()
else:
_SCREAMING_SNAKE_CASE =OpenAIGPTConfig.from_json_file(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =OpenAIGPTModel(_UpperCamelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
_SCREAMING_SNAKE_CASE =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
_SCREAMING_SNAKE_CASE =pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , _UpperCamelCase )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
lowerCamelCase : str = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 47
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =botoa.client('iam' )
_SCREAMING_SNAKE_CASE ={
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_UpperCamelCase , AssumeRolePolicyDocument=json.dumps(_UpperCamelCase , indent=2 ) )
_SCREAMING_SNAKE_CASE ={
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_UpperCamelCase , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(_UpperCamelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =botoa.client('iam' )
return iam_client.get_role(RoleName=_UpperCamelCase )["Role"]["Arn"]
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =_ask_options(
'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , _UpperCamelCase , )
_SCREAMING_SNAKE_CASE =None
if credentials_configuration == 0:
_SCREAMING_SNAKE_CASE =_ask_field('Enter your AWS Profile name: [default] ' , default='default' )
_SCREAMING_SNAKE_CASE =aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
_SCREAMING_SNAKE_CASE =_ask_field('AWS Access Key ID: ' )
_SCREAMING_SNAKE_CASE =aws_access_key_id
_SCREAMING_SNAKE_CASE =_ask_field('AWS Secret Access Key: ' )
_SCREAMING_SNAKE_CASE =aws_secret_access_key
_SCREAMING_SNAKE_CASE =_ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' )
_SCREAMING_SNAKE_CASE =aws_region
_SCREAMING_SNAKE_CASE =_ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , _UpperCamelCase , )
if role_management == 0:
_SCREAMING_SNAKE_CASE =_ask_field('Enter your IAM role name: ' )
else:
_SCREAMING_SNAKE_CASE ='accelerate_sagemaker_execution_role'
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =None
if is_custom_docker_image:
_SCREAMING_SNAKE_CASE =_ask_field('Enter your Docker image: ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =None
if is_sagemaker_inputs_enabled:
_SCREAMING_SNAKE_CASE =_ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =None
if is_sagemaker_metrics_enabled:
_SCREAMING_SNAKE_CASE =_ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , )
_SCREAMING_SNAKE_CASE =_ask_options(
'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , )
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
if use_dynamo:
_SCREAMING_SNAKE_CASE ='dynamo_'
_SCREAMING_SNAKE_CASE =_ask_options(
'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
if use_custom_options:
_SCREAMING_SNAKE_CASE =_ask_options(
'Which mode do you want to use?' , _UpperCamelCase , lambda _UpperCamelCase : TORCH_DYNAMO_MODES[int(_UpperCamelCase )] , default='default' , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE ='Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
_SCREAMING_SNAKE_CASE =_ask_options(
_UpperCamelCase , _UpperCamelCase , lambda _UpperCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_UpperCamelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
_SCREAMING_SNAKE_CASE =_ask_field(_UpperCamelCase , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , default='ml.p3.2xlarge' )
_SCREAMING_SNAKE_CASE =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
_SCREAMING_SNAKE_CASE =_ask_field(
'How many machines do you want use? [1]: ' , _UpperCamelCase , default=1 , )
_SCREAMING_SNAKE_CASE =_ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=_UpperCamelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_UpperCamelCase , use_cpu=_UpperCamelCase , dynamo_config=_UpperCamelCase , eca_instance_type=_UpperCamelCase , profile=_UpperCamelCase , region=_UpperCamelCase , iam_role_name=_UpperCamelCase , mixed_precision=_UpperCamelCase , num_machines=_UpperCamelCase , sagemaker_inputs_file=_UpperCamelCase , sagemaker_metrics_file=_UpperCamelCase , )
| 47
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363
|
"""simple docstring"""
from collections.abc import Generator
from math import sin
def a__ ( __lowercase ) -> bytes:
if len(__lowercase ) != 32:
raise ValueError("Input must be of length 32" )
_A = B""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def a__ ( __lowercase ) -> bytes:
if i < 0:
raise ValueError("Input must be non-negative" )
_A = format(__lowercase , "08x" )[-8:]
_A = B""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def a__ ( __lowercase ) -> bytes:
_A = B""
for char in message:
bit_string += format(__lowercase , "08b" ).encode("utf-8" )
_A = format(len(__lowercase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__lowercase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def a__ ( __lowercase ) -> Generator[list[int], None, None]:
if len(__lowercase ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__lowercase ) , 512 ):
_A = bit_string[pos : pos + 512]
_A = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def a__ ( __lowercase ) -> int:
if i < 0:
raise ValueError("Input must be non-negative" )
_A = format(__lowercase , "032b" )
_A = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__lowercase , 2 )
def a__ ( __lowercase , __lowercase ) -> int:
return (a + b) % 2**32
def a__ ( __lowercase , __lowercase ) -> int:
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def a__ ( __lowercase ) -> bytes:
_A = preprocess(__lowercase )
_A = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_A = 0x67_452_301
_A = 0xef_cda_b89
_A = 0x98_bad_cfe
_A = 0x10_325_476
_A = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__lowercase ):
_A = aa
_A = ba
_A = ca
_A = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_A = d ^ (b & (c ^ d))
_A = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_A = c ^ (d & (b ^ c))
_A = (5 * i + 1) % 16
elif i <= 47:
_A = b ^ c ^ d
_A = (3 * i + 5) % 16
else:
_A = c ^ (b | not_aa(__lowercase ))
_A = (7 * i) % 16
_A = (f + a + added_consts[i] + block_words[g]) % 2**32
_A = d
_A = c
_A = b
_A = sum_aa(__lowercase , left_rotate_aa(__lowercase , shift_amounts[i] ) )
# Add hashed chunk to running total
_A = sum_aa(__lowercase , __lowercase )
_A = sum_aa(__lowercase , __lowercase )
_A = sum_aa(__lowercase , __lowercase )
_A = sum_aa(__lowercase , __lowercase )
_A = reformat_hex(__lowercase ) + reformat_hex(__lowercase ) + reformat_hex(__lowercase ) + reformat_hex(__lowercase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=__A ):
__lowerCamelCase : Dict = ["torch", "scipy"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch", "scipy"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch", "scipy"] )
| 158
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Optional[int] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['YolosFeatureExtractor']
UpperCamelCase__ : int = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344
| 0
|
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__UpperCamelCase : Optional[int] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
__UpperCamelCase : Dict = []
__UpperCamelCase : Any = []
__UpperCamelCase : str = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
__UpperCamelCase : List[str] = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': f'''🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results''',
'''emoji''': True,
},
}
]
__UpperCamelCase : Dict = 0
for log in Path().glob('''*.log'''):
__UpperCamelCase : Dict = 0
with open(log, '''r''') as f:
for line in f:
__UpperCamelCase : List[str] = json.loads(line)
if line.get('''nodeid''', '''''') != "":
__UpperCamelCase : str = line['''nodeid''']
if line.get('''duration''', None) is not None:
__UpperCamelCase : Any = f'''{line['duration']:.4f}'''
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__UpperCamelCase : Any = []
log.unlink()
__UpperCamelCase : int = ''''''
__UpperCamelCase : Union[str, Any] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
__UpperCamelCase : Union[str, Any] = []
__UpperCamelCase : str = {}
for test in failed_tests:
__UpperCamelCase : Tuple = test[0].split('''::''')
__UpperCamelCase : str = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
__UpperCamelCase : Optional[Any] = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__UpperCamelCase : Union[str, Any] = [test[0] for test in failed_table]
__UpperCamelCase : Dict = list(set(files))
# Count number of instances in failed_tests
__UpperCamelCase : Tuple = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__UpperCamelCase : str = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
__UpperCamelCase : Union[str, Any] = '''Too many failed tests, please see the full report in the Action results.'''
__UpperCamelCase : str = len(err) + 10
__UpperCamelCase : Union[str, Any] = message[: 3000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
__UpperCamelCase : int = '''No failed tests! 🤗'''
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
__UpperCamelCase : Tuple = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
__UpperCamelCase : Any = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
__UpperCamelCase : int = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': f'''https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
payload.append(action_button)
__UpperCamelCase : Dict = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': f'''Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}''',
}
],
}
payload.append(date_report)
__UpperCamelCase : Optional[Any] = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
__UpperCamelCase : str = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__UpperCamelCase : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
__UpperCamelCase : Optional[int] = row[0]
else:
__UpperCamelCase : List[str] = ''''''
__UpperCamelCase : Optional[Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```''',
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 309
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCamelCase : str = [0, 25, 50]
__UpperCamelCase : int = [25, 50, 75]
__UpperCamelCase : str = fuzz.membership.trimf(X, abca)
__UpperCamelCase : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCamelCase : Dict = np.ones(75)
__UpperCamelCase : str = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__UpperCamelCase : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__UpperCamelCase : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__UpperCamelCase : Dict = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__UpperCamelCase : List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__UpperCamelCase : List[str] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__UpperCamelCase : Tuple = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__UpperCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__UpperCamelCase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''ConvNextFeatureExtractor''']
_lowerCAmelCase = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 298
|
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=9_9 , _UpperCAmelCase=1_3 , _UpperCAmelCase=1_6 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=3_2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=3_0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=None , ) -> int:
__UpperCamelCase : List[str] = parent
__UpperCamelCase : str = batch_size
__UpperCamelCase : str = decoder_seq_length
# For common tests
__UpperCamelCase : Optional[int] = self.decoder_seq_length
__UpperCamelCase : Any = is_training
__UpperCamelCase : Tuple = use_attention_mask
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Dict = vocab_size
__UpperCamelCase : Optional[int] = d_model
__UpperCamelCase : Union[str, Any] = d_model
__UpperCamelCase : int = decoder_layers
__UpperCamelCase : Dict = decoder_layers
__UpperCamelCase : str = decoder_ffn_dim
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : List[Any] = eos_token_id
__UpperCamelCase : int = bos_token_id
__UpperCamelCase : Tuple = pad_token_id
__UpperCamelCase : Tuple = decoder_start_token_id
__UpperCamelCase : Dict = use_cache
__UpperCamelCase : Optional[Any] = max_position_embeddings
__UpperCamelCase : int = None
__UpperCamelCase : Optional[int] = decoder_seq_length
__UpperCamelCase : Optional[int] = 2
__UpperCamelCase : Optional[int] = 1
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : int = None
if self.use_attention_mask:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__UpperCamelCase : List[str] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : Optional[Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]:
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Optional[Any] = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
__UpperCamelCase : Optional[Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__UpperCamelCase : str = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
__UpperCamelCase : List[Any] = model(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 )
__UpperCamelCase : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase : Optional[int] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__UpperCamelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Tuple = model(_UpperCAmelCase )["last_hidden_state"]
__UpperCamelCase : Any = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )["last_hidden_state"]
# select random slice
__UpperCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__UpperCamelCase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : List[str] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = config_and_inputs
__UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A = (TrOCRForCausalLM,) if is_torch_available() else ()
A = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A = True
A = False
def a_ (self ) -> List[str]:
__UpperCamelCase : Optional[int] = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase )
__UpperCamelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase )
def a_ (self ) -> Dict:
pass
def a_ (self ) -> Optional[int]:
pass
def a_ (self ) -> Optional[Any]:
pass
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> List[Any]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase )
def a_ (self ) -> Any:
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def a_ (self ) -> Tuple:
pass
| 298
| 1
|
from __future__ import annotations
from collections.abc import Iterator
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[Any] , _A : int ) -> None:
"""simple docstring"""
snake_case_ : Optional[int] = value
snake_case_ : Node | None = None
snake_case_ : Node | None = None
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[str] , _A : Node ) -> None:
"""simple docstring"""
snake_case_ : Optional[Any] = tree
def UpperCAmelCase_ ( self : Optional[Any] , _A : Node | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Union[str, Any] ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88
| 1
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :List[Any] = ["""image_processor""", """tokenizer"""]
__magic_name__ :Any = """ChineseCLIPImageProcessor"""
__magic_name__ :Any = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __UpperCAmelCase , )
lowerCAmelCase__ :Tuple = kwargs.pop('feature_extractor' )
lowerCAmelCase__ :int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :str = self.image_processor
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCAmelCase__ :Union[str, Any] = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
lowerCAmelCase__ :Any = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
lowerCAmelCase__ :Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.tokenizer.model_input_names
lowerCAmelCase__ :Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , )
return self.image_processor_class
| 293
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :List[str] = XGLMTokenizer
__magic_name__ :Any = XGLMTokenizerFast
__magic_name__ :Dict = True
__magic_name__ :Union[str, Any] = True
def snake_case ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ :int = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = '<pad>'
lowerCAmelCase__ :int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_8 )
def snake_case ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(__UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase__ :int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase__ :Tuple = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowerCAmelCase__ :Optional[int] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def snake_case ( self ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def snake_case ( self ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__UpperCAmelCase , f.name )
lowerCAmelCase__ :Dict = XGLMTokenizer(f.name , keep_accents=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = pickle.dumps(__UpperCAmelCase )
pickle.loads(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase__ :List[str] = self.get_rust_tokenizer()
lowerCAmelCase__ :Optional[Any] = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ :Dict = tokenizer.tokenize(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :int = self.get_rust_tokenizer()
lowerCAmelCase__ :Dict = tokenizer.encode(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = 'Hello World!'
lowerCAmelCase__ :Tuple = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowerCAmelCase__ :List[str] = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = {
'input_ids': [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='facebook/xglm-564M' , padding=__UpperCAmelCase , )
| 293
| 1
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _A (__a ) -> List[Any]:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE_ : Optional[int] = model_type_to_module_name(__a )
SCREAMING_SNAKE_CASE_ : List[str] = importlib.import_module(f'.{module_name}' , '''transformers.models''' )
try:
return getattr(__a , __a )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__a , '''__name__''' , __a ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE_ : Optional[Any] = importlib.import_module('''transformers''' )
if hasattr(__a , __a ):
return getattr(__a , __a )
return None
def _A (__a , __a = None , __a = False , __a = False , __a = None , __a = None , __a = None , __a = False , **__a , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = get_file_from_repo(
__a , __a , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(__a , encoding='''utf-8''' ) as reader:
return json.load(__a )
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple):
'''simple docstring'''
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''')
@classmethod
@replace_list_option_in_docstrings(lowercase_)
def _SCREAMING_SNAKE_CASE ( cls : List[str] , lowercase_ : int , **lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('''config''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''trust_remote_code''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = FeatureExtractionMixin.get_feature_extractor_dict(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : str = config_dict.get('''feature_extractor_type''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {}):
SCREAMING_SNAKE_CASE_ : str = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowercase_ , lowercase_):
SCREAMING_SNAKE_CASE_ : Dict = AutoConfig.from_pretrained(lowercase_ , **lowercase_)
# It could be in `config.feature_extractor_type``
SCREAMING_SNAKE_CASE_ : Dict = getattr(lowercase_ , '''feature_extractor_type''' , lowercase_)
if hasattr(lowercase_ , '''auto_map''') and "AutoFeatureExtractor" in config.auto_map:
SCREAMING_SNAKE_CASE_ : Optional[int] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
SCREAMING_SNAKE_CASE_ : Any = feature_extractor_class_from_name(lowercase_)
SCREAMING_SNAKE_CASE_ : int = feature_extractor_auto_map is not None
SCREAMING_SNAKE_CASE_ : Any = feature_extractor_class is not None or type(lowercase_) in FEATURE_EXTRACTOR_MAPPING
SCREAMING_SNAKE_CASE_ : Optional[int] = resolve_trust_remote_code(
lowercase_ , lowercase_ , lowercase_ , lowercase_)
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE_ : Optional[int] = get_class_from_dynamic_module(
lowercase_ , lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('''code_revision''' , lowercase_)
if os.path.isdir(lowercase_):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowercase_ , **lowercase_)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowercase_ , **lowercase_)
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowercase_) in FEATURE_EXTRACTOR_MAPPING:
SCREAMING_SNAKE_CASE_ : int = FEATURE_EXTRACTOR_MAPPING[type(lowercase_)]
return feature_extractor_class.from_dict(lowercase_ , **lowercase_)
raise ValueError(
F'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '
F'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}')
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : Any):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(lowercase_ , lowercase_)
| 318
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["image_processor", "feature_extractor"]
__UpperCamelCase = "TvltImageProcessor"
__UpperCamelCase = "TvltFeatureExtractor"
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]):
'''simple docstring'''
super().__init__(image_processor=lowercase_ , feature_extractor=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor
SCREAMING_SNAKE_CASE_ : Optional[Any] = feature_extractor
def __call__( self : Any , lowercase_ : str=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : str=None , lowercase_ : int=False , lowercase_ : Union[str, Any]=False , *lowercase_ : List[Any] , **lowercase_ : List[str] , ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''')
SCREAMING_SNAKE_CASE_ : Any = None
if images is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor(lowercase_ , mask_pixel=lowercase_ , *lowercase_ , **lowercase_)
if images_mixed is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor(lowercase_ , is_mixed=lowercase_ , *lowercase_ , **lowercase_)
if audio is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(
lowercase_ , *lowercase_ , sampling_rate=lowercase_ , mask_audio=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = {}
if audio is not None:
output_dict.update(lowercase_)
if images is not None:
output_dict.update(lowercase_)
if images_mixed_dict is not None:
output_dict.update(lowercase_)
return output_dict
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor.model_input_names
SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 318
| 1
|
from __future__ import annotations
from typing import Any
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[Any] ) -> None:
"""simple docstring"""
create_state_space_tree(__UpperCamelCase , [] , 0 )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[Any] , __UpperCamelCase : list[Any] , __UpperCamelCase : int ) -> None:
"""simple docstring"""
if index == len(__UpperCamelCase ):
print(__UpperCamelCase )
return
create_state_space_tree(__UpperCamelCase , __UpperCamelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__UpperCamelCase , __UpperCamelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__lowerCamelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 219
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = "Salesforce/blip-image-captioning-base"
lowerCAmelCase_ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
lowerCAmelCase_ = "image_captioner"
lowerCAmelCase_ = AutoModelForVisionaSeq
lowerCAmelCase_ = ["image"]
lowerCAmelCase_ = ["text"]
def __init__( self : List[Any] , *_lowercase : Optional[int] , **_lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*_lowercase , **_lowercase )
def __a ( self : Tuple , _lowercase : "Image" ):
"""simple docstring"""
return self.pre_processor(images=_lowercase , return_tensors="""pt""" )
def __a ( self : Union[str, Any] , _lowercase : Optional[int] ):
"""simple docstring"""
return self.model.generate(**_lowercase )
def __a ( self : int , _lowercase : Any ):
"""simple docstring"""
return self.pre_processor.batch_decode(_lowercase , skip_special_tokens=_lowercase )[0].strip()
| 219
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : Union[str, Any] = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = ['ViTFeatureExtractor']
A : str = ['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 370
|
from __future__ import annotations
def __lowerCAmelCase ( a__ , a__ = None ) -> list[list[str]]:
__a = word_bank or []
# create a table
__a = len(a__ ) + 1
__a = []
for _ in range(a__ ):
table.append([] )
# seed value
__a = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(a__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(a__ )] == word:
__a = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(a__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(a__ )]:
combination.reverse()
return table[len(a__ )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 33
| 0
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
snake_case_ : Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def A__ ( UpperCAmelCase_ ):
for pegasus_name, hf_name in PATTERNS:
_UpperCamelCase : Optional[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
return k
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase_ )
_UpperCamelCase : Any = PegasusConfig(**UpperCAmelCase_ )
_UpperCamelCase : int = PegasusForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : int = torch_model.model.state_dict()
_UpperCamelCase : Optional[int] = {}
for k, v in tf_weights.items():
_UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
_UpperCamelCase : int = v.T
_UpperCamelCase : List[str] = torch.tensor(UpperCAmelCase_ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
_UpperCamelCase : List[Any] = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
_UpperCamelCase : Any = mapping['shared.weight']
_UpperCamelCase : Optional[Any] = mapping['shared.weight']
_UpperCamelCase : str = {k: torch.zeros_like(UpperCAmelCase_ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : int = torch_model.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def A__ ( UpperCAmelCase_="./ckpt/aeslc/model.ckpt-32000" ):
_UpperCamelCase : str = tf.train.list_variables(UpperCAmelCase_ )
_UpperCamelCase : int = {}
_UpperCamelCase : Optional[int] = ['Adafactor', 'global_step']
for name, shape in tqdm(UpperCAmelCase_ , desc='converting tf checkpoint to dict' ):
_UpperCamelCase : str = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCamelCase : List[str] = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = array
return tf_weights
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
# save tokenizer first
_UpperCamelCase : Optional[int] = Path(UpperCAmelCase_ ).parent.name
_UpperCamelCase : Dict = task_specific_params[f'summarization_{dataset}']['max_position_embeddings']
_UpperCamelCase : Tuple = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=UpperCAmelCase_ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase_ )
# convert model
_UpperCamelCase : Optional[Any] = get_tf_weights_as_numpy(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
_UpperCamelCase : Optional[int] = task_specific_params
_UpperCamelCase : int = convert_pegasus(UpperCAmelCase_ , UpperCAmelCase_ )
torch_model.save_pretrained(UpperCAmelCase_ )
_UpperCamelCase : List[str] = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(UpperCAmelCase_ , Path(UpperCAmelCase_ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
snake_case_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
snake_case_ : Optional[int] = parser.parse_args()
if args.save_dir is None:
snake_case_ : List[str] = Path(args.tf_ckpt_path).parent.name
snake_case_ : Optional[int] = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 83
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
snake_case_ : Dict = logging.get_logger(__name__)
class lowercase__ ( lowercase ):
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : float ,**lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : List[Any] = feature_size
_UpperCamelCase : Any = sampling_rate
_UpperCamelCase : Optional[Any] = padding_value
_UpperCamelCase : Union[str, Any] = kwargs.pop('padding_side' ,'right' )
_UpperCamelCase : Dict = kwargs.pop('return_attention_mask' ,lowerCamelCase__ )
super().__init__(**lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] ,lowerCamelCase__ : Union[bool, str, PaddingStrategy] = True ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,):
'''simple docstring'''
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase__ ,(list, tuple) ) and isinstance(processed_features[0] ,(dict, BatchFeature) ):
_UpperCamelCase : int = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F' to this method that includes {self.model_input_names[0]}, but you provided'
F' {list(processed_features.keys() )}' )
_UpperCamelCase : List[Any] = processed_features[self.model_input_names[0]]
_UpperCamelCase : Dict = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase__ ) == 0:
if return_attention_mask:
_UpperCamelCase : Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_UpperCamelCase : List[str] = required_input[0]
if isinstance(lowerCamelCase__ ,(list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_UpperCamelCase : List[str] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase__ ):
_UpperCamelCase : Dict = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase__ ):
_UpperCamelCase : Any = 'tf'
elif is_torch_tensor(lowerCamelCase__ ):
_UpperCamelCase : Optional[int] = 'pt'
elif isinstance(lowerCamelCase__ ,(int, float, list, tuple, np.ndarray) ):
_UpperCamelCase : int = 'np'
else:
raise ValueError(
F'type of {first_element} unknown: {type(lowerCamelCase__ )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] ,(int, float) ):
_UpperCamelCase : Any = to_numpy(lowerCamelCase__ )
else:
_UpperCamelCase : Any = [to_numpy(lowerCamelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
_UpperCamelCase : Optional[int] = self._get_padding_strategies(padding=lowerCamelCase__ ,max_length=lowerCamelCase__ )
_UpperCamelCase : str = processed_features[self.model_input_names[0]]
_UpperCamelCase : List[str] = len(lowerCamelCase__ )
if not all(len(lowerCamelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
_UpperCamelCase : List[str] = []
for i in range(lowerCamelCase__ ):
_UpperCamelCase : List[str] = {k: v[i] for k, v in processed_features.items()}
# truncation
_UpperCamelCase : List[str] = self._truncate(
lowerCamelCase__ ,max_length=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,truncation=lowerCamelCase__ ,)
truncated_inputs.append(lowerCamelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_UpperCamelCase : Union[str, Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_UpperCamelCase : Any = PaddingStrategy.MAX_LENGTH
_UpperCamelCase : Optional[Any] = {}
for i in range(lowerCamelCase__ ):
# padding
_UpperCamelCase : Any = self._pad(
truncated_inputs[i] ,max_length=lowerCamelCase__ ,padding_strategy=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,)
for key, value in outputs.items():
if key not in batch_outputs:
_UpperCamelCase : Dict = []
if value.dtype is np.dtype(np.floataa ):
_UpperCamelCase : Any = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase__ )
return BatchFeature(lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_UpperCamelCase : Optional[Any] = len(lowerCamelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCamelCase : str = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCamelCase : str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_UpperCamelCase : Tuple = np.ones(len(lowerCamelCase__ ) ,dtype=np.intaa )
if needs_to_be_padded:
_UpperCamelCase : Dict = max_length - len(lowerCamelCase__ )
if self.padding_side == "right":
if return_attention_mask:
_UpperCamelCase : Optional[int] = np.pad(
processed_features['attention_mask'] ,(0, difference) )
_UpperCamelCase : Union[str, Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_UpperCamelCase : List[Any] = np.pad(
lowerCamelCase__ ,lowerCamelCase__ ,'constant' ,constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_UpperCamelCase : List[Any] = np.pad(
processed_features['attention_mask'] ,(difference, 0) )
_UpperCamelCase : List[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_UpperCamelCase : List[str] = np.pad(
lowerCamelCase__ ,lowerCamelCase__ ,'constant' ,constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
_UpperCamelCase : int = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCamelCase : Optional[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCamelCase : Optional[int] = len(lowerCamelCase__ ) > max_length
if needs_to_be_truncated:
_UpperCamelCase : Dict = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_UpperCamelCase : Optional[Any] = processed_features['attention_mask'][:max_length]
return processed_features
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : int=False ,lowerCamelCase__ : Optional[Any]=None ):
'''simple docstring'''
# Get padding strategy
if padding is not False:
if padding is True:
_UpperCamelCase : Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Tuple = PaddingStrategy(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = padding
else:
_UpperCamelCase : List[Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 83
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_snake_case : Dict = logging.get_logger(__name__)
def snake_case_ (UpperCamelCase : Union[tf.Tensor, np.ndarray] ):
'''simple docstring'''
if isinstance(UpperCamelCase , np.ndarray ):
return list(tensor.shape )
_a = tf.shape(UpperCamelCase )
if tensor.shape == tf.TensorShape(UpperCamelCase ):
return dynamic
_a = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCamelCase )]
def snake_case_ (UpperCamelCase : tf.Tensor , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCamelCase , name=UpperCamelCase )
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : Tuple=1e-5 , UpperCamelCase : Tuple=-1 ):
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCamelCase , UpperCamelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
_a , _a = tf.nn.moments(UpperCamelCase , axes=[axis] , keepdims=UpperCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_a = [1] * inputs.shape.rank
_a = shape_list(UpperCamelCase )[axis]
_a = tf.reshape(UpperCamelCase , UpperCamelCase )
_a = tf.reshape(UpperCamelCase , UpperCamelCase )
# Compute layer normalization using the batch_normalization
# function.
_a = tf.nn.batch_normalization(
UpperCamelCase , UpperCamelCase , UpperCamelCase , offset=UpperCamelCase , scale=UpperCamelCase , variance_epsilon=UpperCamelCase , )
return outputs
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : List[str]=0 , UpperCamelCase : str=-1 ):
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_a = tf.shape(UpperCamelCase )
_a = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_a = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCamelCase , UpperCamelCase )
def snake_case_ (UpperCamelCase : tf.Tensor ):
'''simple docstring'''
if not isinstance(UpperCamelCase , tf.Tensor ):
_a = tf.convert_to_tensor(UpperCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_a = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_a = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_a = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def snake_case_ (UpperCamelCase : tf.Tensor , UpperCamelCase : int , UpperCamelCase : str = "input_ids" ):
'''simple docstring'''
tf.debugging.assert_less(
UpperCamelCase , tf.cast(UpperCamelCase , dtype=tensor.dtype ) , message=(
f'The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCamelCase )}) must be smaller than the embedding '
f'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) , )
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_a = [x for x in data if len(UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
f'bytes: {bad_attributes}' )
_a = np.asarray(UpperCamelCase )
_a = 1
_a = np.array_split(UpperCamelCase , UpperCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_a = np.array_split(UpperCamelCase , UpperCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCamelCase ):
_a = chunk_data
else:
_a = data
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Any ):
'''simple docstring'''
if name in group.attrs:
_a = [n.decode('''utf8''' ) if hasattr(UpperCamelCase , '''decode''' ) else n for n in group.attrs[name]]
else:
_a = []
_a = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(UpperCamelCase , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
def _expand_single_ad_tensor(UpperCamelCase : List[Any] ):
if isinstance(UpperCamelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCamelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCamelCase )
| 179
|
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 179
| 1
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : list[list[str]] = [[] for _ in range(A_ )]
lowerCAmelCase__ : Union[str, Any] = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(A_ ) <= key:
return input_string
for position, character in enumerate(A_ ):
lowerCAmelCase__ : Union[str, Any] = position % (lowest * 2) # puts it in bounds
lowerCAmelCase__ : Optional[int] = min(A_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(A_ )
lowerCAmelCase__ : str = [''''''.join(A_ ) for row in temp_grid]
lowerCAmelCase__ : int = ''''''.join(A_ )
return output_string
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : int = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
lowerCAmelCase__ : list[list[str]] = [[] for _ in range(A_ )] # generates template
for position in range(len(A_ ) ):
lowerCAmelCase__ : int = position % (lowest * 2) # puts it in bounds
lowerCAmelCase__ : Dict = min(A_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
lowerCAmelCase__ : Dict = 0
for row in temp_grid: # fills in the characters
lowerCAmelCase__ : List[Any] = input_string[counter : counter + len(A_ )]
grid.append(list(A_ ) )
counter += len(A_ )
lowerCAmelCase__ : Tuple = '''''' # reads as zigzag
for position in range(len(A_ ) ):
lowerCAmelCase__ : Tuple = position % (lowest * 2) # puts it in bounds
lowerCAmelCase__ : Optional[int] = min(A_ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Dict = {}
for key_guess in range(1 , len(A_ ) ): # tries every key
lowerCAmelCase__ : List[str] = decrypt(A_ , A_ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
snake_case_ : Union[str, Any] = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
snake_case_ : int = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
snake_case_ : Optional[Any] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
return float((preds == labels).mean() )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
UpperCAmelCase_ : str = simple_accuracy(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Tuple = float(fa_score(y_true=SCREAMING_SNAKE_CASE__, y_pred=SCREAMING_SNAKE_CASE__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : str ) -> Dict:
UpperCAmelCase_ : Optional[int] = float(pearsonr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )[0] )
UpperCAmelCase_ : str = float(spearmanr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a (datasets.Metric ):
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] ) -> List[Any]:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__magic_name__ , __magic_name__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(__magic_name__ , __magic_name__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__magic_name__ , __magic_name__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__magic_name__ , __magic_name__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 125
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase__ = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , ):
if attention_mask is None:
__lowerCAmelCase : int = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__lowerCAmelCase : List[str] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__lowerCAmelCase : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase : int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0.02 , ):
__lowerCAmelCase : Dict = parent
__lowerCAmelCase : Dict = batch_size
__lowerCAmelCase : Tuple = seq_length
__lowerCAmelCase : Optional[int] = is_training
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : Tuple = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : Tuple = num_hidden_layers
__lowerCAmelCase : Any = num_attention_heads
__lowerCAmelCase : Dict = intermediate_size
__lowerCAmelCase : Tuple = hidden_act
__lowerCAmelCase : Any = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = eos_token_id
__lowerCAmelCase : int = pad_token_id
__lowerCAmelCase : int = bos_token_id
__lowerCAmelCase : Optional[Any] = initializer_range
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__lowerCAmelCase : Tuple = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__lowerCAmelCase : Optional[Any] = shift_tokens_right(_SCREAMING_SNAKE_CASE , 1 , 2 )
__lowerCAmelCase : Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = prepare_blenderbot_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return config, inputs_dict
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = 20
__lowerCAmelCase : Union[str, Any] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = model.encode(inputs_dict['input_ids'] )
__lowerCAmelCase : List[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__lowerCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCAmelCase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , decoder_position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = model.decode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 20
__lowerCAmelCase : Any = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = model.encode(inputs_dict['input_ids'] )
__lowerCAmelCase : Optional[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__lowerCAmelCase : List[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowerCAmelCase : Dict = model.init_cache(decoder_input_ids.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , decoder_position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : Any = model.decode(
decoder_input_ids[:, -1:] , _SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_SCREAMING_SNAKE_CASE , decoder_position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = model.decode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class A__ ( unittest.TestCase):
A_ : Optional[Any] = 9_9
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__lowerCAmelCase : Dict = input_ids.shape[0]
__lowerCAmelCase : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self._get_config_and_data()
__lowerCAmelCase : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = lm_model(input_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__lowerCAmelCase : int = FlaxBlenderbotForConditionalGeneration(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__lowerCAmelCase : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__lowerCAmelCase : Union[str, Any] = lm_model(input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__lowerCAmelCase : Optional[Any] = shift_tokens_right(_SCREAMING_SNAKE_CASE , 1 , 2 )
__lowerCAmelCase : Dict = np.equal(_SCREAMING_SNAKE_CASE , 1 ).astype(np.floataa ).sum()
__lowerCAmelCase : str = np.equal(_SCREAMING_SNAKE_CASE , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_SCREAMING_SNAKE_CASE , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A__ ( _lowerCamelCase , unittest.TestCase , _lowerCamelCase):
A_ : str = True
A_ : int = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
A_ : List[str] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = FlaxBlenderbotModelTester(self )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : List[str] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
@jax.jit
def encode_jitted(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
return model.encode(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
with self.subTest('JIT Enabled' ):
__lowerCAmelCase : Any = encode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCAmelCase : Union[str, Any] = encode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__lowerCAmelCase : Optional[int] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return model.decode(
decoder_input_ids=_SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , encoder_outputs=_SCREAMING_SNAKE_CASE , )
with self.subTest('JIT Enabled' ):
__lowerCAmelCase : Dict = decode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCAmelCase : Dict = decode_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Optional[Any] = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__lowerCAmelCase : Any = np.ones((1, 1) ) * model.config.eos_token_id
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
__lowerCAmelCase : List[Any] = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
__lowerCAmelCase : Optional[Any] = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
__lowerCAmelCase : List[str] = ['Sam']
__lowerCAmelCase : int = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='jax' )
__lowerCAmelCase : Optional[int] = model.generate(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = 'Sam is a great name. It means "sun" in Gaelic.'
__lowerCAmelCase : Dict = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
assert generated_txt[0].strip() == tgt_text
| 351
|
"""simple docstring"""
import os
def __lowerCAmelCase (_UpperCamelCase = "input.txt" ):
with open(os.path.join(os.path.dirname(_UpperCamelCase ) , _UpperCamelCase ) ) as input_file:
__lowerCAmelCase : Optional[Any] = [
[int(_UpperCamelCase ) for element in line.split(',' )]
for line in input_file.readlines()
]
__lowerCAmelCase : List[str] = len(_UpperCamelCase )
__lowerCAmelCase : Tuple = len(matrix[0] )
__lowerCAmelCase : int = [[-1 for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
__lowerCAmelCase : Any = matrix[i][0]
for j in range(1 , _UpperCamelCase ):
for i in range(_UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__lowerCAmelCase : List[str] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'{solution() = }')
| 182
| 0
|
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["vqvae"]
def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> Optional[Any]:
super().__init__()
self.register_modules(unet=lowerCAmelCase__, scheduler=lowerCAmelCase__, mel=lowerCAmelCase__, vqvae=lowerCAmelCase__)
def a_ ( self) -> int:
return 50 if isinstance(self.scheduler, lowerCAmelCase__) else 1000
@torch.no_grad()
def __call__( self, lowerCAmelCase__ = 1, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = 0, lowerCAmelCase__ = 0, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = 0, lowerCAmelCase__ = 0, lowerCAmelCase__ = None, lowerCAmelCase__ = 0, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__=True, ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
snake_case_ = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowerCAmelCase__)
snake_case_ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
snake_case_ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
snake_case_ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
), generator=lowerCAmelCase__, device=self.device, )
snake_case_ = noise
snake_case_ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = self.mel.audio_slice_to_image(lowerCAmelCase__)
snake_case_ = np.frombuffer(input_image.tobytes(), dtype='uint8').reshape(
(input_image.height, input_image.width))
snake_case_ = (input_image / 255) * 2 - 1
snake_case_ = torch.tensor(input_image[np.newaxis, :, :], dtype=torch.float).to(self.device)
if self.vqvae is not None:
snake_case_ = self.vqvae.encode(torch.unsqueeze(lowerCAmelCase__, 0)).latent_dist.sample(
generator=lowerCAmelCase__)[0]
snake_case_ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
snake_case_ = self.scheduler.add_noise(lowerCAmelCase__, lowerCAmelCase__, self.scheduler.timesteps[start_step - 1])
snake_case_ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
snake_case_ = int(mask_start_secs * pixels_per_second)
snake_case_ = int(mask_end_secs * pixels_per_second)
snake_case_ = self.scheduler.add_noise(lowerCAmelCase__, lowerCAmelCase__, torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet, lowerCAmelCase__):
snake_case_ = self.unet(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)['sample']
else:
snake_case_ = self.unet(lowerCAmelCase__, lowerCAmelCase__)['sample']
if isinstance(self.scheduler, lowerCAmelCase__):
snake_case_ = self.scheduler.step(
model_output=lowerCAmelCase__, timestep=lowerCAmelCase__, sample=lowerCAmelCase__, eta=lowerCAmelCase__, generator=lowerCAmelCase__, )['prev_sample']
else:
snake_case_ = self.scheduler.step(
model_output=lowerCAmelCase__, timestep=lowerCAmelCase__, sample=lowerCAmelCase__, generator=lowerCAmelCase__, )['prev_sample']
if mask is not None:
if mask_start > 0:
snake_case_ = mask[:, step, :, :mask_start]
if mask_end > 0:
snake_case_ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
snake_case_ = 1 / self.vqvae.config.scaling_factor * images
snake_case_ = self.vqvae.decode(lowerCAmelCase__)['sample']
snake_case_ = (images / 2 + 0.5).clamp(0, 1)
snake_case_ = images.cpu().permute(0, 2, 3, 1).numpy()
snake_case_ = (images * 255).round().astype('uint8')
snake_case_ = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowerCAmelCase__, mode='RGB').convert('L') for _ in images))
snake_case_ = [self.mel.image_to_audio(lowerCAmelCase__) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowerCAmelCase__)[:, np.newaxis, :]), **ImagePipelineOutput(lowerCAmelCase__))
@torch.no_grad()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = 50) -> np.ndarray:
assert isinstance(self.scheduler, lowerCAmelCase__)
self.scheduler.set_timesteps(lowerCAmelCase__)
snake_case_ = np.array(
[np.frombuffer(image.tobytes(), dtype='uint8').reshape((1, image.height, image.width)) for image in images])
snake_case_ = (sample / 255) * 2 - 1
snake_case_ = torch.Tensor(lowerCAmelCase__).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps, (0,))):
snake_case_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
snake_case_ = self.scheduler.alphas_cumprod[t]
snake_case_ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
snake_case_ = 1 - alpha_prod_t
snake_case_ = self.unet(lowerCAmelCase__, lowerCAmelCase__)['sample']
snake_case_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
snake_case_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
snake_case_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> torch.Tensor:
snake_case_ = acos(torch.dot(torch.flatten(lowerCAmelCase__), torch.flatten(lowerCAmelCase__)) / torch.norm(lowerCAmelCase__) / torch.norm(lowerCAmelCase__))
return sin((1 - alpha) * theta) * xa / sin(lowerCAmelCase__) + sin(alpha * theta) * xa / sin(lowerCAmelCase__)
| 69
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = """https://openaipublic.azureedge.net/jukebox/models/"""
__snake_case = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] ):
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
UpperCamelCase :int = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
UpperCamelCase :Union[str, Any] = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
UpperCamelCase :Optional[Any] = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
UpperCamelCase :Optional[int] = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
UpperCamelCase :Any = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
UpperCamelCase :int = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
UpperCamelCase :Any = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
UpperCamelCase :str = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :Optional[int] = {}
import re
UpperCamelCase :int = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
UpperCamelCase :str = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
UpperCamelCase :int = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
UpperCamelCase :Tuple = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
UpperCamelCase :int = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
UpperCamelCase :Optional[int] = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
UpperCamelCase :Optional[Any] = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
UpperCamelCase :int = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
UpperCamelCase :Tuple = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :int = re_encoder_block_conv_in.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = regex_match.groups()
UpperCamelCase :List[str] = int(groups[2] ) * 2 + int(groups[3] )
UpperCamelCase :List[Any] = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
UpperCamelCase :int = re_encoder_block_conv_in.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_encoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = re_encoder_block_resnet.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = regex_match.groups()
UpperCamelCase :Any = int(groups[2] ) * 2 + int(groups[3] )
UpperCamelCase :Any = {'''1''': 1, '''3''': 2}[groups[-2]]
UpperCamelCase :str = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
UpperCamelCase :List[str] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
UpperCamelCase :Union[str, Any] = prefix + resnet_block
UpperCamelCase :str = re_encoder_block_resnet.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_encoder_block_proj_out.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[int] = re_encoder_block_proj_out.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = regex_match.groups()
UpperCamelCase :int = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
UpperCamelCase :str = re_encoder_block_proj_out.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = re_decoder_block_conv_out.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = regex_match.groups()
UpperCamelCase :str = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCamelCase :List[Any] = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
UpperCamelCase :Union[str, Any] = re_decoder_block_conv_out.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_decoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = re_decoder_block_resnet.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = regex_match.groups()
UpperCamelCase :List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCamelCase :Optional[int] = {'''1''': 1, '''3''': 2}[groups[-2]]
UpperCamelCase :Any = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
UpperCamelCase :Optional[int] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
UpperCamelCase :Any = prefix + resnet_block
UpperCamelCase :Optional[int] = re_decoder_block_resnet.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_decoder_block_proj_in.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[int] = re_decoder_block_proj_in.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[Any] = regex_match.groups()
UpperCamelCase :List[Any] = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
UpperCamelCase :Any = re_decoder_block_proj_in.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = re_prior_cond_conv_out.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = regex_match.groups()
UpperCamelCase :str = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCamelCase :Tuple = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
UpperCamelCase :int = re_prior_cond_conv_out.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_prior_cond_resnet.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :int = re_prior_cond_resnet.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = regex_match.groups()
UpperCamelCase :Optional[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCamelCase :int = {'''1''': 1, '''3''': 2}[groups[-2]]
UpperCamelCase :Tuple = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
UpperCamelCase :List[Any] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
UpperCamelCase :Any = prefix + resnet_block
UpperCamelCase :Dict = re_prior_cond_resnet.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_prior_cond_proj_in.fullmatch(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :List[str] = re_prior_cond_proj_in.match(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = regex_match.groups()
UpperCamelCase :Dict = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
UpperCamelCase :Any = re_prior_cond_proj_in.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# keep original key
else:
UpperCamelCase :List[str] = original_key
UpperCamelCase :Any = replace_key(SCREAMING_SNAKE_CASE__ )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
UpperCamelCase :Union[str, Any] = model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
UpperCamelCase :List[Any] = original_key
UpperCamelCase :Any = original_key
UpperCamelCase :Optional[int] = value
return new_dict
@torch.no_grad()
def _A ( SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Dict=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
UpperCamelCase :Dict = requests.get(F'''{PREFIX}{file}''' , allow_redirects=SCREAMING_SNAKE_CASE__ )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=SCREAMING_SNAKE_CASE__ )
open(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' , '''wb''' ).write(r.content )
UpperCamelCase :Optional[int] = MODEL_MAPPING[model_name.split('''/''' )[-1]]
UpperCamelCase :Any = JukeboxConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = JukeboxModel(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = []
UpperCamelCase :List[Any] = {}
for i, dict_name in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :int = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )['''model''']
UpperCamelCase :Tuple = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
UpperCamelCase :Optional[int] = old_dic[k]
elif k.endswith('''.w''' ):
UpperCamelCase :Optional[Any] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
UpperCamelCase :Optional[Any] = old_dic[k]
else:
UpperCamelCase :Any = old_dic[k]
UpperCamelCase :Any = '''vqvae''' if i == 0 else F'''priors.{3 - i}'''
UpperCamelCase :Dict = fix_jukebox_keys(SCREAMING_SNAKE_CASE__ , model.state_dict() , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
weight_dict.append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = weight_dict.pop(0 )
model.vqvae.load_state_dict(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
return weight_dict
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
__snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 259
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase_ = Lock()
def __magic_name__ ( A , A , A , A , A , A , A ) -> Any:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
snake_case = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
snake_case = min(A , A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
snake_case = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
snake_case = max(A , A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A )
def __magic_name__ ( A ) -> str:
snake_case = []
snake_case = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
snake_case = temp_rs
snake_case = temp_rr
for i in range(1 , len(A ) - 1 ):
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
snake_case = temp_rs
snake_case = temp_rr
process_array_.append(
Process(
target=A , args=(
len(A ) - 1,
arr[len(A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(A ) ):
snake_case = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __magic_name__ ( ) -> Tuple:
snake_case = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*A )
snake_case = odd_even_transposition(A )
print('Sorted List\n' )
print(*A )
if __name__ == "__main__":
main()
| 332
| 1
|
from __future__ import annotations
import requests
A_ : Optional[Any] = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def UpperCamelCase (lowercase_: str , lowercase_: int = 1 , lowercase_: str = "new" , lowercase_: list | None = None ) -> dict:
A__ : Dict = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowercase_ ) - valid_terms ) ):
A__ : Union[str, Any] = f"""Invalid search term: {invalid_search_terms}"""
raise ValueError(lowercase_ )
A__ : Optional[int] = requests.get(
f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 429:
raise requests.HTTPError
A__ : str = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowercase_ )}
A__ : List[Any] = {}
for id_ in range(lowercase_ ):
A__ : Union[str, Any] = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 192
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A_ : List[str] = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 192
| 1
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCamelCase : Tuple = logging.getLogger(__name__)
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__lowerCAmelCase = field(
default=A__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__lowerCAmelCase = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
__lowerCAmelCase = field(
default=A__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__lowerCAmelCase = field(default=A__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowerCAmelCase = field(
default=A__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
__lowerCAmelCase = field(
default=A__ , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
__lowerCAmelCase = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__lowerCAmelCase = field(
default=A__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __a ( ) ->Optional[Any]:
"""simple docstring"""
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
A = import_module("""tasks""" )
try:
A = getattr(UpperCAmelCase , model_args.task_type )
A = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
A = token_classification_task.get_labels(data_args.labels )
A = dict(enumerate(UpperCAmelCase ) )
A = len(UpperCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid={label: i for i, label in enumerate(UpperCAmelCase )} , cache_dir=model_args.cache_dir , )
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
A = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
A = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , labels=UpperCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , labels=UpperCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCAmelCase , UpperCAmelCase ) -> Tuple[List[int], List[int]]:
A = np.argmax(UpperCAmelCase , axis=2 )
A , A = preds.shape
A = [[] for _ in range(UpperCAmelCase )]
A = [[] for _ in range(UpperCAmelCase )]
for i in range(UpperCAmelCase ):
for j in range(UpperCAmelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCAmelCase ) -> Dict:
A , A = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCAmelCase , UpperCAmelCase ),
"precision": precision_score(UpperCAmelCase , UpperCAmelCase ),
"recall": recall_score(UpperCAmelCase , UpperCAmelCase ),
"f1": fa_score(UpperCAmelCase , UpperCAmelCase ),
}
# Data collator
A = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , UpperCAmelCase , UpperCAmelCase )
writer.write("""%s = %s\n""" % (key, value) )
results.update(UpperCAmelCase )
# Predict
if training_args.do_predict:
A = TokenClassificationDataset(
token_classification_task=UpperCAmelCase , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , labels=UpperCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
A , A , A = trainer.predict(UpperCAmelCase )
A , A = align_predictions(UpperCAmelCase , UpperCAmelCase )
A = os.path.join(training_args.output_dir , """test_results.txt""" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase , """w""" ) as writer:
for key, value in metrics.items():
logger.info(""" %s = %s""" , UpperCAmelCase , UpperCAmelCase )
writer.write("""%s = %s\n""" % (key, value) )
# Save predictions
A = os.path.join(training_args.output_dir , """test_predictions.txt""" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase , """w""" ) as writer:
with open(os.path.join(data_args.data_dir , """test.txt""" ) , """r""" ) as f:
token_classification_task.write_predictions_to_file(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return results
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 337
|
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
return [ord(UpperCAmelCase ) - 96 for elem in plain]
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def __a ( ) ->None:
"""simple docstring"""
A = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , UpperCAmelCase )
print("""Decoded:""" , decode(UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 337
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A=1 , A=0 , A=2 , A=512 , A="cls" , A=False , A=True , **A , ) -> int:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_SCREAMING_SNAKE_CASE = project_dim
_SCREAMING_SNAKE_CASE = pooler_fn
_SCREAMING_SNAKE_CASE = learn_encoder
_SCREAMING_SNAKE_CASE = use_attention_mask
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = [R'''pooler''', R'''logit_scale''']
UpperCamelCase = [R'''position_ids''', R'''predictions.decoder.bias''']
UpperCamelCase = '''roberta'''
UpperCamelCase = RobertaSeriesConfig
def __init__( self , A ) -> Optional[int]:
super().__init__(A )
_SCREAMING_SNAKE_CASE = XLMRobertaModel(A )
_SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim )
_SCREAMING_SNAKE_CASE = getattr(A , """has_pre_transformation""" , A )
if self.has_pre_transformation:
_SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim )
_SCREAMING_SNAKE_CASE = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def snake_case_( self , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , ) -> Any:
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.base_model(
input_ids=A , attention_mask=A , token_type_ids=A , position_ids=A , head_mask=A , inputs_embeds=A , encoder_hidden_states=A , encoder_attention_mask=A , output_attentions=A , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=A , )
if self.has_pre_transformation:
_SCREAMING_SNAKE_CASE = outputs["""hidden_states"""][-2]
_SCREAMING_SNAKE_CASE = self.pre_LN(A )
_SCREAMING_SNAKE_CASE = self.transformation_pre(A )
return TransformationModelOutput(
projection_state=A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_SCREAMING_SNAKE_CASE = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 58
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_A : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : Dict , *A : Any , **A : List[Any] ) ->None:
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 142
| 0
|
import math
def lowerCamelCase__ ( snake_case_ : int ) -> list[int]:
__snake_case = []
__snake_case = 2
__snake_case = int(math.sqrt(snake_case_ ) ) # Size of every segment
__snake_case = [True] * (end + 1)
__snake_case = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case_ )
for i in range(start * start , end + 1 , snake_case_ ):
__snake_case = False
start += 1
prime += in_prime
__snake_case = end + 1
__snake_case = min(2 * end , snake_case_ )
while low <= n:
__snake_case = [True] * (high - low + 1)
for each in in_prime:
__snake_case = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case_ , high + 1 , snake_case_ ):
__snake_case = False
for j in range(len(snake_case_ ) ):
if temp[j] is True:
prime.append(j + low )
__snake_case = high + 1
__snake_case = min(high + end , snake_case_ )
return prime
print(sieve(10**6))
| 238
|
class SCREAMING_SNAKE_CASE__ :
def __init__(self : str , a__ : list ):
"""simple docstring"""
__snake_case = set_counts
__snake_case = max(a__ )
__snake_case = len(a__ )
__snake_case = [1] * num_sets
__snake_case = list(range(a__ ) )
def a (self : str , a__ : int , a__ : int ):
"""simple docstring"""
__snake_case = self.get_parent(a__ )
__snake_case = self.get_parent(a__ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__snake_case = 0
__snake_case = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__snake_case = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__snake_case = 0
__snake_case = src_parent
__snake_case = self.set_counts[src_parent]
__snake_case = max(self.max_set , a__ )
return True
def a (self : Union[str, Any] , a__ : int ):
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
__snake_case = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 238
| 1
|
"""simple docstring"""
import sys
from collections import defaultdict
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = []
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[Any]):
'''simple docstring'''
return self.node_position[vertex]
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = pos
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : List[str] , lowercase_ : str , lowercase_ : str , lowercase_ : Union[str, Any]):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE_ : Any = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2 * start + 1
else:
SCREAMING_SNAKE_CASE_ : List[Any] = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = temp, tempa
SCREAMING_SNAKE_CASE_ : str = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child] , self.get_position(positions[start]))
self.set_position(positions[start] , lowercase_)
self.top_to_bottom(lowercase_ , lowercase_ , lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE_ : int = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
SCREAMING_SNAKE_CASE_ : int = heap[parent]
SCREAMING_SNAKE_CASE_ : List[Any] = position[parent]
self.set_position(position[parent] , lowercase_)
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
SCREAMING_SNAKE_CASE_ : Optional[Any] = temp
self.set_position(lowercase_ , lowercase_)
break
SCREAMING_SNAKE_CASE_ : Tuple = parent
else:
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : Union[str, Any] = temp
self.set_position(lowercase_ , 0)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(lowercase_) // 2 - 1
for i in range(lowercase_ , -1 , -1):
self.top_to_bottom(lowercase_ , lowercase_ , len(lowercase_) , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Dict , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = positions[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = sys.maxsize
self.top_to_bottom(lowercase_ , 0 , len(lowercase_) , lowercase_)
return temp
def _A (__a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = Heap()
SCREAMING_SNAKE_CASE_ : List[Any] = [0] * len(__a )
SCREAMING_SNAKE_CASE_ : List[str] = [-1] * len(__a ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE_ : Optional[int] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE_ : List[str] = []
for vertex in range(len(__a ) ):
distance_tv.append(sys.maxsize )
positions.append(__a )
heap.node_position.append(__a )
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : str = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : Any = distance
heap.heapify(__a , __a )
for _ in range(1 , len(__a ) ):
SCREAMING_SNAKE_CASE_ : Tuple = heap.delete_minimum(__a , __a )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE_ : str = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__a )]
):
SCREAMING_SNAKE_CASE_ : Optional[int] = distance
heap.bottom_to_top(
__a , heap.get_position(__a ) , __a , __a )
SCREAMING_SNAKE_CASE_ : Dict = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase_ : List[str] = int(input("""Enter number of edges: """).strip())
UpperCAmelCase_ : Optional[int] = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase_ : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 91
|
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
UpperCAmelCase = """CompVis/stable-diffusion-v1-1"""
UpperCAmelCase = """CompVis/stable-diffusion-v1-2"""
UpperCAmelCase = """CompVis/stable-diffusion-v1-3"""
UpperCAmelCase = """CompVis/stable-diffusion-v1-4"""
class UpperCAmelCase_ ( _lowercase):
def __init__( self : Tuple , __UpperCamelCase : AutoencoderKL , __UpperCamelCase : CLIPTextModel , __UpperCamelCase : CLIPTokenizer , __UpperCamelCase : UNetaDConditionModel , __UpperCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase : StableDiffusionSafetyChecker , __UpperCamelCase : CLIPImageProcessor , __UpperCamelCase : bool = True , ) -> int:
super()._init_()
_UpperCamelCase = StableDiffusionPipeline.from_pretrained(__UpperCamelCase )
_UpperCamelCase = StableDiffusionPipeline.from_pretrained(__UpperCamelCase )
_UpperCamelCase = StableDiffusionPipeline.from_pretrained(__UpperCamelCase )
_UpperCamelCase = StableDiffusionPipeline(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , requires_safety_checker=__UpperCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _UpperCamelCase ( self : List[Any] ) -> Dict[str, Any]:
return {k: getattr(self , __UpperCamelCase ) for k in self.config.keys() if not k.startswith('''_''' )}
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : Optional[Union[str, int]] = "auto" ) -> str:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def _UpperCamelCase ( self : int ) -> Tuple:
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 50 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : Dict , ) -> int:
return self.pipea(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
@torch.no_grad()
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 50 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : List[Any] , ) -> int:
return self.pipea(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
@torch.no_grad()
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 50 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : Any , ) -> Any:
return self.pipea(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
@torch.no_grad()
def _UpperCamelCase ( self : Any , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 50 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : Optional[Any] , ) -> Union[str, Any]:
return self.pipea(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
@torch.no_grad()
def _UpperCamelCase ( self : Any , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 50 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : List[str] , ) -> Optional[Any]:
_UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(__UpperCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
_UpperCamelCase = self.textaimg_sda_a(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
_UpperCamelCase = self.textaimg_sda_a(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
_UpperCamelCase = self.textaimg_sda_a(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
_UpperCamelCase = self.textaimg_sda_a(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 256
| 0
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowerCAmelCase :Any = logging.getLogger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] ):
# save results
if os.path.exists(UpperCamelCase__ ):
if os.path.exists(os.path.join(UpperCamelCase__ , '''config.json''' ) ) and os.path.isfile(
os.path.join(UpperCamelCase__ , '''config.json''' ) ):
os.remove(os.path.join(UpperCamelCase__ , '''config.json''' ) )
if os.path.exists(os.path.join(UpperCamelCase__ , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(UpperCamelCase__ , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(UpperCamelCase__ , '''pytorch_model.bin''' ) )
else:
os.makedirs(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : List[str]=False ):
_UpperCAmelCase : List[str] = 2
if unlogit:
_UpperCAmelCase : List[str] = torch.pow(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : List[Any] = p * torch.log(UpperCamelCase__ )
_UpperCAmelCase : Tuple = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
logger.info('''lv, h >\t''' + '''\t'''.join(F'{x + 1}' for x in range(len(UpperCamelCase__ ) ) ) )
for row in range(len(UpperCamelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(F'layer {row + 1}:\t' + '''\t'''.join(F'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(F'layer {row + 1}:\t' + '''\t'''.join(F'{x:d}' for x in tensor[row].cpu().data ) )
def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : int=False ):
_UpperCAmelCase , _UpperCAmelCase : Any = model.config.num_hidden_layers, model.config.num_attention_heads
_UpperCAmelCase : Union[str, Any] = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
_UpperCAmelCase : Any = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
if head_mask is None:
_UpperCAmelCase : Optional[Any] = torch.ones(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCamelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_UpperCAmelCase : Dict = None
_UpperCAmelCase : Tuple = 0.0
_UpperCAmelCase : Optional[int] = 0.0
for step, inputs in enumerate(tqdm(UpperCamelCase__ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
_UpperCAmelCase : List[str] = tuple(t.to(args.device ) for t in inputs )
((_UpperCAmelCase) , ) : List[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_UpperCAmelCase : str = model(UpperCamelCase__ , labels=UpperCamelCase__ , head_mask=UpperCamelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCamelCase__ ):
_UpperCAmelCase : Any = entropy(attn.detach() , UpperCamelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCamelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_UpperCAmelCase : str = 2
_UpperCAmelCase : List[str] = torch.pow(torch.pow(UpperCamelCase__ , UpperCamelCase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-2_0
if not args.dont_normalize_global_importance:
_UpperCAmelCase : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(UpperCamelCase__ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(UpperCamelCase__ )
logger.info('''Head ranked by importance scores''' )
_UpperCAmelCase : Optional[int] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_UpperCAmelCase : Optional[int] = torch.arange(
head_importance.numel() , device=args.device )
_UpperCAmelCase : str = head_ranks.view_as(UpperCamelCase__ )
print_ad_tensor(UpperCamelCase__ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , UpperCamelCase__ , original_score * args.masking_threshold )
_UpperCAmelCase : str = torch.ones_like(UpperCamelCase__ )
_UpperCAmelCase : Dict = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_UpperCAmelCase : Optional[int] = original_score
while current_score >= original_score * args.masking_threshold:
_UpperCAmelCase : str = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_UpperCAmelCase : Optional[int] = float('''Inf''' )
_UpperCAmelCase : Tuple = head_importance.view(-1 ).sort()[1]
if len(UpperCamelCase__ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
_UpperCAmelCase : List[str] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
_UpperCAmelCase : Dict = new_head_mask.view(-1 )
_UpperCAmelCase : int = 0.0
_UpperCAmelCase : List[Any] = new_head_mask.view_as(UpperCamelCase__ )
_UpperCAmelCase : Dict = new_head_mask.clone().detach()
print_ad_tensor(UpperCamelCase__ )
# Compute metric and head importance again
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , head_mask=UpperCamelCase__ )
_UpperCAmelCase : Any = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , UpperCamelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(UpperCamelCase__ )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
_UpperCAmelCase : Optional[int] = datetime.now()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ )
_UpperCAmelCase : List[Any] = 1 / loss
_UpperCAmelCase : Tuple = datetime.now() - before_time
_UpperCAmelCase : Dict = sum(p.numel() for p in model.parameters() )
_UpperCAmelCase : int = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCamelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase : Any = [
v,
]
assert sum(len(UpperCamelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = sum(p.numel() for p in model.parameters() )
_UpperCAmelCase : Dict = datetime.now()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ , actually_pruned=UpperCamelCase__ , )
_UpperCAmelCase : Any = 1 / loss
_UpperCAmelCase : Any = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , UpperCamelCase__ , UpperCamelCase__ , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , UpperCamelCase__ , UpperCamelCase__ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(UpperCamelCase__ , args.output_dir )
def lowerCamelCase_ ():
_UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=UpperCamelCase__ , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=UpperCamelCase__ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=UpperCamelCase__ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=UpperCamelCase__ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=UpperCamelCase__ , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=UpperCamelCase__ , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=UpperCamelCase__ , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=UpperCamelCase__ , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=UpperCamelCase__ , default=42 )
parser.add_argument('''--local_rank''' , type=UpperCamelCase__ , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=UpperCamelCase__ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=UpperCamelCase__ , default='''''' , help='''Can be used for distant debugging.''' )
_UpperCAmelCase : Any = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCamelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_UpperCAmelCase : Tuple = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
_UpperCAmelCase : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_UpperCAmelCase : Union[str, Any] = torch.device('''cuda''' , args.local_rank )
_UpperCAmelCase : Union[str, Any] = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_UpperCAmelCase : List[str] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_UpperCAmelCase : List[str] = nn.parallel.DistributedDataParallel(
UpperCamelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCamelCase__ )
elif args.n_gpu > 1:
_UpperCAmelCase : Tuple = nn.DataParallel(UpperCamelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , UpperCamelCase__ )
# Prepare dataset
_UpperCAmelCase : int = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_UpperCAmelCase : List[Any] = (torch.from_numpy(UpperCamelCase__ ),)
_UpperCAmelCase : List[str] = TensorDataset(*UpperCamelCase__ )
_UpperCAmelCase : Any = RandomSampler(UpperCamelCase__ )
_UpperCAmelCase : Any = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_UpperCAmelCase : List[Any] = mask_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
prune_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 68
|
"""simple docstring"""
from itertools import count
def lowerCamelCase_ (UpperCamelCase__ : int = 50 ):
_UpperCAmelCase : Tuple = [1] * min_block_length
for n in count(UpperCamelCase__ ):
fill_count_functions.append(1 )
for block_length in range(UpperCamelCase__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(f"{solution() = }")
| 68
| 1
|
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowercase : Dict = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Union[str, Any]:
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = None ) -> Any:
_snake_case = tesseract_config if tesseract_config is not None else ''
# apply OCR
_snake_case = to_pil_image(__A )
_snake_case , _snake_case = pil_image.size
_snake_case = pytesseract.image_to_data(__A , lang=__A , output_type='dict' , config=__A )
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
_snake_case = [idx for idx, word in enumerate(__A ) if not word.strip()]
_snake_case = [word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
_snake_case = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_snake_case = []
for x, y, w, h in zip(__A , __A , __A , __A ):
_snake_case = [x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
_snake_case = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = ["""pixel_values"""]
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = "" , **lowerCAmelCase_ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_snake_case = size if size is not None else {'height': 2_24, 'width': 2_24}
_snake_case = get_size_dict(lowerCAmelCase_ )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = apply_ocr
_snake_case = ocr_lang
_snake_case = tesseract_config
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
_snake_case = (size['height'], size['width'])
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCAmelCase_ )
_snake_case = resample if resample is not None else self.resample
_snake_case = apply_ocr if apply_ocr is not None else self.apply_ocr
_snake_case = ocr_lang if ocr_lang is not None else self.ocr_lang
_snake_case = tesseract_config if tesseract_config is not None else self.tesseract_config
_snake_case = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if apply_ocr:
requires_backends(self , 'pytesseract' )
_snake_case = []
_snake_case = []
for image in images:
_snake_case , _snake_case = apply_tesseract(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
words_batch.append(lowerCAmelCase_ )
boxes_batch.append(lowerCAmelCase_ )
if do_resize:
_snake_case = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_snake_case = [flip_channel_order(lowerCAmelCase_ ) for image in images]
_snake_case = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_snake_case = BatchFeature(data={'pixel_values': images} , tensor_type=lowerCAmelCase_ )
if apply_ocr:
_snake_case = words_batch
_snake_case = boxes_batch
return data
| 42
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__A : List[str] = '''examples/'''
__A : int = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__A : Dict = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__A : Optional[int] = '''README.md'''
def lowercase ( __snake_case : int , __snake_case : Any , __snake_case : int ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : int = f.read()
lowercase_ , lowercase_ : List[str] = REPLACE_PATTERNS[pattern]
lowercase_ : Union[str, Any] = replace.replace('''VERSION''' , __snake_case )
lowercase_ : Optional[Any] = re_pattern.sub(__snake_case , __snake_case )
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__snake_case )
def lowercase ( __snake_case : int ):
for folder, directories, fnames in os.walk(__snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__snake_case , __snake_case ) , __snake_case , pattern='''examples''' )
def lowercase ( __snake_case : Optional[Any] , __snake_case : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__snake_case , __snake_case , __snake_case )
if not patch:
update_version_in_examples(__snake_case )
def lowercase ( ):
lowercase_ : Union[str, Any] = '''🤗 Transformers currently provides the following architectures'''
lowercase_ : Union[str, Any] = '''1. Want to contribute a new model?'''
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : List[str] = f.readlines()
# Find the start of the list.
lowercase_ : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase_ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowercase_ : str = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__snake_case )
def lowercase ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowercase_ : List[Any] = f.read()
lowercase_ : List[str] = REPLACE_PATTERNS['''init'''][0].search(__snake_case ).groups()[0]
return packaging.version.parse(__snake_case )
def lowercase ( __snake_case : Optional[Any]=False ):
lowercase_ : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowercase_ : Optional[Any] = default_version.base_version
elif patch:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowercase_ : int = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : Dict = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case , patch=__snake_case )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowercase ( ):
lowercase_ : List[Any] = get_version()
lowercase_ : List[str] = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowercase_ : Any = current_version.base_version
# Check with the user we got that right.
lowercase_ : Tuple = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : str = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__A : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 33
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Optional[Any] = logging.get_logger(__name__)
__A : List[Any] = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __UpperCamelCase ( _A ):
SCREAMING_SNAKE_CASE = "mobilenet_v2"
def __init__(self : str , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2_2_4 , __SCREAMING_SNAKE_CASE : Optional[int]=1.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=8 , __SCREAMING_SNAKE_CASE : List[str]=8 , __SCREAMING_SNAKE_CASE : Dict=6 , __SCREAMING_SNAKE_CASE : Dict=3_2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Dict="relu6" , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : str=0.8 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0_2 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0_0_1 , __SCREAMING_SNAKE_CASE : int=2_5_5 , **__SCREAMING_SNAKE_CASE : int , ):
super().__init__(**__SCREAMING_SNAKE_CASE)
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero.")
A = num_channels
A = image_size
A = depth_multiplier
A = depth_divisible_by
A = min_depth
A = expand_ratio
A = output_stride
A = first_layer_is_expansion
A = finegrained_output
A = hidden_act
A = tf_padding
A = classifier_dropout_prob
A = initializer_range
A = layer_norm_eps
A = semantic_loss_ignore_index
class __UpperCamelCase ( _A ):
SCREAMING_SNAKE_CASE = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE__ (self : int):
return OrderedDict([("pixel_values", {0: "batch"})])
@property
def SCREAMING_SNAKE_CASE__ (self : Tuple):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})])
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})])
@property
def SCREAMING_SNAKE_CASE__ (self : int):
return 1E-4
| 57
|
"""simple docstring"""
__A : Optional[Any] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 57
| 1
|
from __future__ import annotations
def lowercase( UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
if len(UpperCamelCase_ ) == 0:
return array
UpperCamelCase , UpperCamelCase = min(UpperCamelCase_ ), max(UpperCamelCase_ )
# Compute the variables
UpperCamelCase = _max - _min + 1
UpperCamelCase , UpperCamelCase = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCamelCase = i - _min
UpperCamelCase = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCamelCase = 0
for i in range(UpperCamelCase_ ):
while holes_repeat[i] > 0:
UpperCamelCase = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = input("""Enter numbers separated by comma:\n""")
_SCREAMING_SNAKE_CASE = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 343
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """xlnet"""
__lowerCAmelCase = ["""mems"""]
__lowerCAmelCase = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , lowerCamelCase_ : Any=3_2000 , lowerCamelCase_ : Dict=1024 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : List[Any]=4096 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Union[str, Any]="bi" , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=1E-12 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Union[str, Any]=512 , lowerCamelCase_ : Any=None , lowerCamelCase_ : str=True , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Union[str, Any]="last" , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : str="tanh" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Dict=5 , lowerCamelCase_ : str=5 , lowerCamelCase_ : Optional[int]=5 , lowerCamelCase_ : Any=1 , lowerCamelCase_ : int=2 , **lowerCamelCase_ : List[Any] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = n_layer
UpperCamelCase = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
UpperCamelCase = d_model // n_head
UpperCamelCase = ff_activation
UpperCamelCase = d_inner
UpperCamelCase = untie_r
UpperCamelCase = attn_type
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = dropout
UpperCamelCase = mem_len
UpperCamelCase = reuse_len
UpperCamelCase = bi_data
UpperCamelCase = clamp_len
UpperCamelCase = same_length
UpperCamelCase = summary_type
UpperCamelCase = summary_use_proj
UpperCamelCase = summary_activation
UpperCamelCase = summary_last_dropout
UpperCamelCase = start_n_top
UpperCamelCase = end_n_top
UpperCamelCase = bos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , lowerCamelCase_ , )
UpperCamelCase = kwargs["""use_cache"""]
UpperCamelCase = use_mems_eval
UpperCamelCase = use_mems_train
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 343
| 1
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
A_ :Any = logging.get_logger(__name__)
A_ :Dict = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] ="""gptj"""
UpperCamelCase__ : Union[str, Any] ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCamelCase__=50400 , lowerCamelCase__=2048 , lowerCamelCase__=4096 , lowerCamelCase__=28 , lowerCamelCase__=16 , lowerCamelCase__=64 , lowerCamelCase__=None , lowerCamelCase__="gelu_new" , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=1E-5 , lowerCamelCase__=0.02 , lowerCamelCase__=True , lowerCamelCase__=50256 , lowerCamelCase__=50256 , lowerCamelCase__=False , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Dict =vocab_size
__UpperCamelCase : Optional[int] =n_positions
__UpperCamelCase : Union[str, Any] =n_embd
__UpperCamelCase : Tuple =n_layer
__UpperCamelCase : Optional[Any] =n_head
__UpperCamelCase : Any =n_inner
__UpperCamelCase : List[str] =rotary_dim
__UpperCamelCase : str =activation_function
__UpperCamelCase : Dict =resid_pdrop
__UpperCamelCase : Any =embd_pdrop
__UpperCamelCase : Optional[int] =attn_pdrop
__UpperCamelCase : Union[str, Any] =layer_norm_epsilon
__UpperCamelCase : Optional[int] =initializer_range
__UpperCamelCase : List[str] =use_cache
__UpperCamelCase : str =bos_token_id
__UpperCamelCase : int =eos_token_id
super().__init__(
bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , tie_word_embeddings=lowerCamelCase__ , **lowerCamelCase__ )
class __A ( a ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ = "default" , lowerCamelCase__ = None , lowerCamelCase__ = False , ):
"""simple docstring"""
super().__init__(lowerCamelCase__ , task=lowerCamelCase__ , patching_specs=lowerCamelCase__ , use_past=lowerCamelCase__ )
if not getattr(self._config , 'pad_token_id' , lowerCamelCase__ ):
# TODO: how to do that better?
__UpperCamelCase : Dict =0
@property
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ , direction='inputs' )
__UpperCamelCase : List[str] ={0: 'batch', 1: 'past_sequence + sequence'}
else:
__UpperCamelCase : Dict ={0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __lowercase ( self ):
"""simple docstring"""
return self._config.n_layer
@property
def __lowercase ( self ):
"""simple docstring"""
return self._config.n_head
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , ):
"""simple docstring"""
__UpperCamelCase : Tuple =super(lowerCamelCase__ , self ).generate_dummy_inputs(
lowerCamelCase__ , batch_size=lowerCamelCase__ , seq_length=lowerCamelCase__ , is_pair=lowerCamelCase__ , framework=lowerCamelCase__ )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase : Tuple =OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__UpperCamelCase : Optional[int] =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__UpperCamelCase : Optional[int] =seqlen + 2
__UpperCamelCase : Any =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase : Any =[
(torch.zeros(lowerCamelCase__ ), torch.zeros(lowerCamelCase__ )) for _ in range(self.num_layers )
]
__UpperCamelCase : Dict =common_inputs['attention_mask']
if self.use_past:
__UpperCamelCase : Union[str, Any] =ordered_inputs['attention_mask'].dtype
__UpperCamelCase : int =torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCamelCase__ , lowerCamelCase__ , dtype=lowerCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def __lowercase ( self ):
"""simple docstring"""
return 13
| 369
|
def A ( a_ ) -> bool:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
A_ :List[str] = int(input('''Enter number: ''').strip())
print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
| 245
| 0
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
'''simple docstring'''
def __init__( self , A , A=13 , A=32 , A=2 , A=3 , A=16 , A=[1, 2, 1] , A=[2, 2, 4] , A=2 , A=2.0 , A=True , A=0.0 , A=0.0 , A=0.1 , A="gelu" , A=False , A=True , A=0.02 , A=1e-5 , A=True , A=None , A=True , A=10 , A=8 , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = embed_dim
_SCREAMING_SNAKE_CASE = depths
_SCREAMING_SNAKE_CASE = num_heads
_SCREAMING_SNAKE_CASE = window_size
_SCREAMING_SNAKE_CASE = mlp_ratio
_SCREAMING_SNAKE_CASE = qkv_bias
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = drop_path_rate
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = use_absolute_embeddings
_SCREAMING_SNAKE_CASE = patch_norm
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = encoder_stride
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def snake_case_( self ) -> str:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def snake_case_( self , A , A , A ) -> Dict:
_SCREAMING_SNAKE_CASE = SwinvaModel(config=A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(A )
_SCREAMING_SNAKE_CASE = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_SCREAMING_SNAKE_CASE = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case_( self , A , A , A ) -> Tuple:
_SCREAMING_SNAKE_CASE = SwinvaForMaskedImageModeling(config=A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = SwinvaForMaskedImageModeling(A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case_( self , A , A , A ) -> Dict:
_SCREAMING_SNAKE_CASE = self.type_sequence_label_size
_SCREAMING_SNAKE_CASE = SwinvaForImageClassification(A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = SwinvaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A , embed_dim=37 )
def snake_case_( self ) -> Optional[int]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def snake_case_( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def snake_case_( self ) -> int:
pass
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A )
_SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(A , A ) )
_SCREAMING_SNAKE_CASE = outputs.attentions
_SCREAMING_SNAKE_CASE = len(self.model_tester.depths )
self.assertEqual(len(A ) , A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = config.window_size**2
_SCREAMING_SNAKE_CASE = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(A , A ) )
_SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(A ) , A )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_SCREAMING_SNAKE_CASE = len(A )
# Check attention is always last and order is fine
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(A , A ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
_SCREAMING_SNAKE_CASE = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_SCREAMING_SNAKE_CASE = 2
self.assertEqual(out_len + added_hidden_states , len(A ) )
_SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(A ) , A )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def snake_case_( self , A , A , A , A ) -> Dict:
_SCREAMING_SNAKE_CASE = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(A , A ) )
_SCREAMING_SNAKE_CASE = outputs.hidden_states
_SCREAMING_SNAKE_CASE = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A ) , A )
# Swinv2 has a different seq_length
_SCREAMING_SNAKE_CASE = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_SCREAMING_SNAKE_CASE = outputs.reshaped_hidden_states
self.assertEqual(len(A ) , A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = reshaped_hidden_states[0].shape
_SCREAMING_SNAKE_CASE = (
reshaped_hidden_states[0].view(A , A , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(A , A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(A , A , A , A )
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_SCREAMING_SNAKE_CASE = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_SCREAMING_SNAKE_CASE = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_SCREAMING_SNAKE_CASE = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(A , A , A , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(A , A , A , (padded_height, padded_width) )
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def snake_case_( self ) -> List[str]:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = SwinvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = _config_zero_init(A )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(config=A )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case_( self ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
A )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_SCREAMING_SNAKE_CASE = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**A )
# verify the logits
_SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A )
_SCREAMING_SNAKE_CASE = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
| 58
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowercase_ = None
lowercase_ = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowercase_ = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class a_ :
'''simple docstring'''
UpperCamelCase = True
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "PIL.Image.Image"
UpperCamelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
UpperCamelCase = field(default='''Image''' , init=snake_case_ , repr=snake_case_ )
def __call__( self ) -> Tuple:
return self.pa_type
def snake_case_( self , A ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = np.array(A )
if isinstance(A , A ):
return {"path": value, "bytes": None}
elif isinstance(A , A ):
return {"path": None, "bytes": value}
elif isinstance(A , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(A )
elif isinstance(A , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(A )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def snake_case_( self , A , A=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(A ):
_SCREAMING_SNAKE_CASE = PIL.Image.open(A )
else:
_SCREAMING_SNAKE_CASE = path.split("""::""" )[-1]
try:
_SCREAMING_SNAKE_CASE = string_to_dict(A , config.HUB_DATASETS_URL )["""repo_id"""]
_SCREAMING_SNAKE_CASE = token_per_repo_id.get(A )
except ValueError:
_SCREAMING_SNAKE_CASE = None
with xopen(A , """rb""" , use_auth_token=A ) as f:
_SCREAMING_SNAKE_CASE = BytesIO(f.read() )
_SCREAMING_SNAKE_CASE = PIL.Image.open(bytes_ )
else:
_SCREAMING_SNAKE_CASE = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case_( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def snake_case_( self , A ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
_SCREAMING_SNAKE_CASE = pa.array([None] * len(A ) , type=pa.binary() )
_SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_SCREAMING_SNAKE_CASE = pa.array([None] * len(A ) , type=pa.string() )
_SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
_SCREAMING_SNAKE_CASE = storage.field("""bytes""" )
else:
_SCREAMING_SNAKE_CASE = pa.array([None] * len(A ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
_SCREAMING_SNAKE_CASE = storage.field("""path""" )
else:
_SCREAMING_SNAKE_CASE = pa.array([None] * len(A ) , type=pa.string() )
_SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_SCREAMING_SNAKE_CASE = pa.array(
[encode_np_array(np.array(A ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
_SCREAMING_SNAKE_CASE = pa.array([None] * len(A ) , type=pa.string() )
_SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(A , self.pa_type )
def snake_case_( self , A ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(A ):
with xopen(A , """rb""" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
return bytes_
_SCREAMING_SNAKE_CASE = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_SCREAMING_SNAKE_CASE = pa.array(
[os.path.basename(A ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
_SCREAMING_SNAKE_CASE = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(A , self.pa_type )
def lowerCamelCase ( ) ->List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_SCREAMING_SNAKE_CASE = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase ( __lowerCamelCase : "PIL.Image.Image" ) ->bytes:
_SCREAMING_SNAKE_CASE = BytesIO()
if image.format in list_image_compression_formats():
_SCREAMING_SNAKE_CASE = image.format
else:
_SCREAMING_SNAKE_CASE = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(__lowerCamelCase , format=__lowerCamelCase )
return buffer.getvalue()
def lowerCamelCase ( __lowerCamelCase : "PIL.Image.Image" ) ->dict:
if hasattr(__lowerCamelCase , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )}
def lowerCamelCase ( __lowerCamelCase : np.ndarray ) ->dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
_SCREAMING_SNAKE_CASE = array.dtype
_SCREAMING_SNAKE_CASE = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
_SCREAMING_SNAKE_CASE = dtype.kind
_SCREAMING_SNAKE_CASE = dtype.itemsize
_SCREAMING_SNAKE_CASE = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_SCREAMING_SNAKE_CASE = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_SCREAMING_SNAKE_CASE = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_SCREAMING_SNAKE_CASE = dtype_byteorder + dtype_kind + str(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = np.dtype(__lowerCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
_SCREAMING_SNAKE_CASE = PIL.Image.fromarray(array.astype(__lowerCamelCase ) )
return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )}
def lowerCamelCase ( __lowerCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) ->List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = first_non_null_value(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__lowerCamelCase , np.ndarray ):
_SCREAMING_SNAKE_CASE = no_op_if_value_is_null(__lowerCamelCase )
return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs]
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
_SCREAMING_SNAKE_CASE = no_op_if_value_is_null(__lowerCamelCase )
return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 58
| 1
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = '''▁'''
__snake_case = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__snake_case = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__snake_case = {
'''facebook/s2t-small-librispeech-asr''': 1024,
}
__snake_case = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__snake_case = {'''mustc''': MUSTC_LANGS}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = MAX_MODEL_INPUT_SIZES
_a = ['input_ids', 'attention_mask']
_a = []
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<unk>" , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , do_upper_case=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , lang_codes=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
UpperCamelCase__ :Dict = do_upper_case
UpperCamelCase__ :Tuple = do_lower_case
UpperCamelCase__ :Union[str, Any] = load_json(UpperCamelCase_ )
UpperCamelCase__ :Dict = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ :Union[str, Any] = spm_file
UpperCamelCase__ :Optional[Any] = load_spm(UpperCamelCase_ , self.sp_model_kwargs )
if lang_codes is not None:
UpperCamelCase__ :str = lang_codes
UpperCamelCase__ :Optional[int] = LANGUAGES[lang_codes]
UpperCamelCase__ :Optional[Any] = [F'''<lang:{lang}>''' for lang in self.langs]
UpperCamelCase__ :Tuple = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
UpperCamelCase__ :Optional[int] = self.lang_tokens
UpperCamelCase__ :str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
UpperCamelCase__ :List[Any] = {}
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return len(self.encoder )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = new_tgt_lang
self.set_tgt_lang_special_tokens(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.lang_code_to_id[tgt_lang]
UpperCamelCase__ :List[Any] = [lang_code_id]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.encoder.get(UpperCamelCase_ , self.encoder[self.unk_token] )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.decoder.get(UpperCamelCase_ , self.unk_token )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = []
UpperCamelCase__ :List[Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
UpperCamelCase__ :Dict = self.sp_model.decode(UpperCamelCase_ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
UpperCamelCase__ :Dict = []
else:
current_sub_tokens.append(UpperCamelCase_ )
UpperCamelCase__ :Any = self.sp_model.decode(UpperCamelCase_ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = [1] * len(self.prefix_tokens )
UpperCamelCase__ :List[str] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase_ )) + ([0] * len(UpperCamelCase_ )) + suffix_ones
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.__dict__.copy()
UpperCamelCase__ :int = None
return state
def __setstate__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ :Any = {}
UpperCamelCase__ :str = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Any = Path(UpperCamelCase_ )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
UpperCamelCase__ :List[str] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
UpperCamelCase__ :Tuple = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , UpperCamelCase_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , UpperCamelCase_ )
elif not os.path.isfile(self.spm_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
UpperCamelCase__ :Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (str(UpperCamelCase_ ), str(UpperCamelCase_ ))
def a ( __a , __a ) -> sentencepiece.SentencePieceProcessor:
UpperCamelCase__ :Any = sentencepiece.SentencePieceProcessor(**__a )
spm.Load(str(__a ) )
return spm
def a ( __a ) -> Union[Dict, List]:
with open(__a , '''r''' ) as f:
return json.load(__a )
def a ( __a , __a ) -> None:
with open(__a , '''w''' ) as f:
json.dump(__a , __a , indent=2 )
| 352
|
'''simple docstring'''
from collections import Counter
from timeit import timeit
def a ( __a = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def a ( __a = "" ) -> bool:
'''simple docstring'''
if len(__a ) == 0:
return True
UpperCamelCase__ :List[Any] = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
UpperCamelCase__ :dict[str, int] = {}
for character in lower_case_input_str:
UpperCamelCase__ :Optional[int] = character_freq_dict.get(__a , 0 ) + 1
UpperCamelCase__ :List[str] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def a ( __a = "" ) -> None:
'''simple docstring'''
print('''\nFor string = ''' , __a , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(__a ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(__a ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
__snake_case = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
__snake_case = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 219
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
'''simple docstring'''
def __init__( self: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Dict=3 , _SCREAMING_SNAKE_CASE: List[str]=32 , _SCREAMING_SNAKE_CASE: List[str]=3 , _SCREAMING_SNAKE_CASE: Dict=10 , _SCREAMING_SNAKE_CASE: List[Any]=[8, 16, 32, 64] , _SCREAMING_SNAKE_CASE: str=[1, 1, 2, 1] , _SCREAMING_SNAKE_CASE: Optional[int]=True , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: int="relu" , _SCREAMING_SNAKE_CASE: str=3 , _SCREAMING_SNAKE_CASE: List[str]=None , _SCREAMING_SNAKE_CASE: Union[str, Any]=["stage2", "stage3", "stage4"] , _SCREAMING_SNAKE_CASE: List[str]=[2, 3, 4] , _SCREAMING_SNAKE_CASE: str=1 , ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = parent
__lowerCAmelCase : List[Any] = batch_size
__lowerCAmelCase : Optional[Any] = image_size
__lowerCAmelCase : List[Any] = num_channels
__lowerCAmelCase : int = embeddings_size
__lowerCAmelCase : Tuple = hidden_sizes
__lowerCAmelCase : List[Any] = depths
__lowerCAmelCase : int = is_training
__lowerCAmelCase : int = use_labels
__lowerCAmelCase : List[Any] = hidden_act
__lowerCAmelCase : List[Any] = num_labels
__lowerCAmelCase : Optional[Any] = scope
__lowerCAmelCase : int = len(_UpperCAmelCase)
__lowerCAmelCase : Optional[int] = out_features
__lowerCAmelCase : Dict = out_indices
__lowerCAmelCase : Dict = num_groups
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCAmelCase : str = None
if self.use_labels:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels)
__lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Dict = BitModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__lowerCAmelCase : int = model(_UpperCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.num_labels
__lowerCAmelCase : List[Any] = BitForImageClassification(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__lowerCAmelCase : Tuple = model(_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Any) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = BitBackbone(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__lowerCAmelCase : Union[str, Any] = model(_UpperCAmelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[Any] = BitBackbone(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__lowerCAmelCase : Optional[Any] = model(_UpperCAmelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
__lowerCAmelCase : List[str] = config_and_inputs
__lowerCAmelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[Any] = BitModelTester(self)
__lowerCAmelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase)
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self: str) -> Any:
"""simple docstring"""
return
@unittest.skip(reason="Bit does not output attentions")
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self: int) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not support input and output embeddings")
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[Any]:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Tuple = model_class(_UpperCAmelCase)
__lowerCAmelCase : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__lowerCAmelCase : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Union[str, Any] = model_class(config=_UpperCAmelCase)
for name, module in model.named_modules():
if isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def _SCREAMING_SNAKE_CASE ( self: Any) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(_SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Optional[int]):
__lowerCAmelCase : str = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__lowerCAmelCase : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__lowerCAmelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase : Any = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Dict = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCAmelCase : Dict = layer_type
__lowerCAmelCase : List[Any] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : Union[str, Any] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
@unittest.skip(reason="Bit does not use feedforward chunking")
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> str:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase)
@slow
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Dict:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Any = BitModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def _lowercase ( ) -> str:
__lowerCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Dict = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(_UpperCAmelCase)
__lowerCAmelCase : Dict = self.default_image_processor
__lowerCAmelCase : Union[str, Any] = prepare_img()
__lowerCAmelCase : int = image_processor(images=_UpperCAmelCase , return_tensors="pt").to(_UpperCAmelCase)
# forward pass
with torch.no_grad():
__lowerCAmelCase : List[Any] = model(**_UpperCAmelCase)
# verify the logits
__lowerCAmelCase : str = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
__lowerCAmelCase : List[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]]).to(_UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4))
@require_torch
class A__ ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (BitBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = BitConfig
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = BitModelTester(self)
| 269
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = tempfile.mkdtemp()
# fmt: off
__A : Optional[int] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__A : Tuple = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
__A : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__A : List[str] = {'unk_token': '<unk>'}
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
__A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(_UpperCAmelCase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(_UpperCAmelCase))
__A : str = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
__A : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCAmelCase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__A : Optional[int] = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.get_tokenizer()
__A : int = self.get_rust_tokenizer()
__A : Any = self.get_image_processor()
__A : Union[str, Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
processor_slow.save_pretrained(self.tmpdirname)
__A : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase)
__A : Union[str, Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
processor_fast.save_pretrained(self.tmpdirname)
__A : Union[str, Any] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase)
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase)
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__A : Dict = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__A : Any = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0)
__A : Tuple = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.get_image_processor()
__A : List[Any] = self.get_tokenizer()
__A : List[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : List[str] = self.prepare_image_inputs()
__A : Dict = image_processor(_UpperCAmelCase , return_tensors='np')
__A : str = processor(images=_UpperCAmelCase , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.get_image_processor()
__A : List[str] = self.get_tokenizer()
__A : Optional[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Dict = 'lower newer'
__A : List[Any] = processor(text=_UpperCAmelCase)
__A : Optional[int] = tokenizer(_UpperCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : Optional[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : int = 'lower newer'
__A : Any = self.prepare_image_inputs()
__A : Tuple = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase):
processor()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.get_image_processor()
__A : str = self.get_tokenizer()
__A : Optional[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : str = processor.batch_decode(_UpperCAmelCase)
__A : Optional[Any] = tokenizer.batch_decode(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.get_image_processor()
__A : int = self.get_tokenizer()
__A : Any = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Any = 'lower newer'
__A : Any = self.prepare_image_inputs()
__A : Any = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 190
| 0
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase__ : int = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase )
lowerCAmelCase__ : Dict = -1
lowerCAmelCase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
lowerCAmelCase__ : List[Any] = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
lowerCAmelCase__ : Dict = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase__ : Tuple = TextStreamer(_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase__ : List[Any] = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase__ : Tuple = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase )
lowerCAmelCase__ : List[Any] = -1
lowerCAmelCase__ : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
lowerCAmelCase__ : str = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
lowerCAmelCase__ : Optional[int] = tokenizer.decode(greedy_ids[0] )
lowerCAmelCase__ : List[Any] = TextIteratorStreamer(_lowerCAmelCase )
lowerCAmelCase__ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
lowerCAmelCase__ : str = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
lowerCAmelCase__ : Optional[Any] = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase )
lowerCAmelCase__ : Any = -1
lowerCAmelCase__ : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
lowerCAmelCase__ : Dict = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
lowerCAmelCase__ : Optional[int] = greedy_ids[:, input_ids.shape[1] :]
lowerCAmelCase__ : List[Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase__ : int = TextStreamer(_lowerCAmelCase , skip_prompt=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase__ : Dict = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
lowerCAmelCase__ : Optional[int] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(_lowerCAmelCase )
lowerCAmelCase__ : List[str] = -1
lowerCAmelCase__ : List[Any] = torch.ones((1, 5) , device=_lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCAmelCase__ : int = TextStreamer(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=1 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCAmelCase__ : Dict = cs.out[:-1] # Remove the final "\n"
lowerCAmelCase__ : List[Any] = tokenizer(_lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase__ : Tuple = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase )
lowerCAmelCase__ : Union[str, Any] = -1
lowerCAmelCase__ : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
lowerCAmelCase__ : List[str] = TextIteratorStreamer(_lowerCAmelCase , timeout=0.001 )
lowerCAmelCase__ : List[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
lowerCAmelCase__ : List[str] = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCAmelCase ):
lowerCAmelCase__ : Tuple = """"""
for new_text in streamer:
streamer_text += new_text
| 370
|
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_A = """base_with_context"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
lowerCAmelCase__ : int = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase__ : str = weights[f"""layers_{lyr_num}"""]
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : str = ly_weight["""attention"""]
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : int = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[Any] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase__ : int = weights[f"""layers_{lyr_num}"""]
lowerCAmelCase__ : Any = ly_weight["""attention"""]
lowerCAmelCase__ : int = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : Any = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
lowerCAmelCase__ : Dict = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCAmelCase__ : List[Any] = weights[f"""layers_{lyr_num}"""]
lowerCAmelCase__ : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Tuple = ly_weight["""self_attention"""]
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[Any] = ly_weight["""MultiHeadDotProductAttention_0"""]
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : int = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCAmelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def lowercase_ ( __UpperCAmelCase ) -> str:
lowerCAmelCase__ : Optional[int] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCAmelCase__ : Optional[int] = jnp.tree_util.tree_map(onp.array , __UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
lowerCAmelCase__ : Dict = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
lowerCAmelCase__ : Tuple = inference.parse_training_gin_file(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Any = inference.InferenceModel(args.checkpoint_path , __UpperCAmelCase )
lowerCAmelCase__ : List[Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
lowerCAmelCase__ : List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
lowerCAmelCase__ : List[str] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
lowerCAmelCase__ : Optional[int] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCAmelCase__ : Optional[Any] = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , __UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , __UpperCAmelCase )
lowerCAmelCase__ : Any = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
lowerCAmelCase__ : Optional[Any] = SpectrogramDiffusionPipeline(
notes_encoder=__UpperCAmelCase , continuous_encoder=__UpperCAmelCase , decoder=__UpperCAmelCase , scheduler=__UpperCAmelCase , melgan=__UpperCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
_A = parser.parse_args()
main(args)
| 212
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
__lowercase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__lowercase = {
'''do_resize''': True,
'''size''': {'''height''': 2_24, '''width''': 2_24},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
'''do_convert_rgb''': True,
}
__lowercase = os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = self.get_image_processor()
__lowercase = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
__lowercase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
__lowercase = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
__lowercase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
__lowercase = self.get_image_processor(do_normalize=lowerCAmelCase__ )
__lowercase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=lowerCAmelCase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(lowerCAmelCase__ , return_tensors='''np''' )
__lowercase = processor(images=lowerCAmelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = '''Alexandra,T-shirt的价格是15便士。'''
__lowercase = processor(text=lowerCAmelCase__ )
__lowercase = tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = '''Alexandra,T-shirt的价格是15便士。'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.batch_decode(lowerCAmelCase__ )
__lowercase = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = '''Alexandra,T-shirt的价格是15便士。'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 210
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a : Optional[int] = logging.get_logger(__name__)
__a : Tuple = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Dict = '''data2vec-text'''
def __init__( self , lowerCAmelCase__=3_05_22 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 210
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : List[str] = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 370
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class A__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any]=7 , _SCREAMING_SNAKE_CASE: Optional[Any]=3 , _SCREAMING_SNAKE_CASE: int=10 , _SCREAMING_SNAKE_CASE: Tuple=18 , _SCREAMING_SNAKE_CASE: Union[str, Any]=30 , _SCREAMING_SNAKE_CASE: Any=400 , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: str=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE: Any=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE: Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = size if size is not None else {"shortest_edge": 18}
__lowerCAmelCase : int = crop_size if crop_size is not None else {"height": 18, "width": 18}
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : List[Any] = batch_size
__lowerCAmelCase : List[str] = num_channels
__lowerCAmelCase : int = num_frames
__lowerCAmelCase : Union[str, Any] = image_size
__lowerCAmelCase : Tuple = min_resolution
__lowerCAmelCase : Tuple = max_resolution
__lowerCAmelCase : str = do_resize
__lowerCAmelCase : Optional[int] = size
__lowerCAmelCase : Optional[int] = do_normalize
__lowerCAmelCase : Dict = image_mean
__lowerCAmelCase : List[Any] = image_std
__lowerCAmelCase : List[Any] = crop_size
def _SCREAMING_SNAKE_CASE ( self: int) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VivitImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = VivitImageProcessingTester(self)
@property
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "image_mean"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "image_std"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_normalize"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_resize"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_center_crop"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "size"))
def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 18})
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18})
__lowerCAmelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def _SCREAMING_SNAKE_CASE ( self: int) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PIL videos
__lowerCAmelCase : Dict = prepare_video_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE)
for video in video_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.assertIsInstance(video[0] , Image.Image)
# Test not batched input
__lowerCAmelCase : Any = image_processing(video_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase : str = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCAmelCase : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE)
for video in video_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.assertIsInstance(video[0] , np.ndarray)
# Test not batched input
__lowerCAmelCase : Any = image_processing(video_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase : List[str] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCAmelCase : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE)
for video in video_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.assertIsInstance(video[0] , torch.Tensor)
# Test not batched input
__lowerCAmelCase : List[str] = image_processing(video_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase : Any = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 58
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class _UpperCamelCase ( UpperCAmelCase__ ):
UpperCAmelCase_ = 'ibert'
def __init__( self :Any , lowerCamelCase :Dict=3_0522 , lowerCamelCase :Optional[int]=768 , lowerCamelCase :Dict=12 , lowerCamelCase :str=12 , lowerCamelCase :List[str]=3072 , lowerCamelCase :Any="gelu" , lowerCamelCase :Optional[Any]=0.1 , lowerCamelCase :Optional[int]=0.1 , lowerCamelCase :List[Any]=512 , lowerCamelCase :Dict=2 , lowerCamelCase :Optional[int]=0.02 , lowerCamelCase :Optional[Any]=1e-12 , lowerCamelCase :Tuple=1 , lowerCamelCase :List[str]=0 , lowerCamelCase :Optional[int]=2 , lowerCamelCase :Dict="absolute" , lowerCamelCase :Dict=False , lowerCamelCase :int="none" , **lowerCamelCase :Tuple , ) -> List[str]:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = quant_mode
UpperCAmelCase__ = force_dequant
class _UpperCamelCase ( UpperCAmelCase__ ):
@property
def UpperCAmelCase_ ( self :Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 169
|
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def _snake_case ( UpperCamelCase : Dataset , UpperCamelCase : Dict[str, str] ):
UpperCAmelCase : Any = args.log_outputs
UpperCAmelCase : Any = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
UpperCAmelCase : List[Any] = load_metric("""wer""" )
UpperCAmelCase : Any = load_metric("""cer""" )
# compute metrics
UpperCAmelCase : int = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
UpperCAmelCase : str = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
# print & log results
UpperCAmelCase : Tuple = F"WER: {wer_result}\nCER: {cer_result}"
print(UpperCamelCase )
with open(F"{dataset_id}_eval_results.txt" , """w""" ) as f:
f.write(UpperCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase : str = F"log_{dataset_id}_predictions.txt"
UpperCAmelCase : Tuple = F"log_{dataset_id}_targets.txt"
with open(UpperCamelCase , """w""" ) as p, open(UpperCamelCase , """w""" ) as t:
# mapping function to write output
def write_to_file(UpperCamelCase : List[Any] , UpperCamelCase : List[Any] ):
p.write(F"{i}" + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(F"{i}" + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(UpperCamelCase , with_indices=UpperCamelCase )
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : List[str] = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase : Dict = re.sub(UpperCamelCase , """""" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase : List[str] = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
UpperCAmelCase : Optional[Any] = """ """.join(text.split(UpperCamelCase ) )
return text
def _snake_case ( UpperCamelCase : Tuple ):
# load dataset
UpperCAmelCase : Union[str, Any] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=UpperCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase : Any = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase : List[str] = dataset.cast_column("""audio""" , Audio(sampling_rate=UpperCamelCase ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase : Optional[int] = 0 if torch.cuda.is_available() else -1
UpperCAmelCase : Tuple = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(UpperCamelCase : Any ):
UpperCAmelCase : Any = asr(
batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase : Tuple = prediction["""text"""]
UpperCAmelCase : List[str] = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
UpperCAmelCase : int = dataset.map(UpperCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
A: List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
A: Union[str, Any] = parser.parse_args()
main(args)
| 109
| 0
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase =logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Tuple:
super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def UpperCamelCase__ ( self ,lowerCamelCase_=None ) -> Dict:
A = {}
if top_k is not None:
A = top_k
return {}, {}, postprocess_params
def __call__( self ,lowerCamelCase_ ,**lowerCamelCase_ ) -> Tuple:
return super().__call__(lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Union[str, Any]:
A = load_image(lowerCamelCase_ )
A = self.image_processor(images=lowerCamelCase_ ,return_tensors=self.framework )
return model_inputs
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Optional[int]:
A = self.model(**lowerCamelCase_ )
return model_outputs
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=5 ) -> int:
if top_k > self.model.config.num_labels:
A = self.model.config.num_labels
if self.framework == "pt":
A = model_outputs.logits.softmax(-1 )[0]
A , A = probs.topk(lowerCamelCase_ )
elif self.framework == "tf":
A = stable_softmax(model_outputs.logits ,axis=-1 )[0]
A = tf.math.top_k(lowerCamelCase_ ,k=lowerCamelCase_ )
A , A = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
A = scores.tolist()
A = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase_ ,lowerCamelCase_ )]
| 77
|
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def _A ( _a : Callable[[int | float], int | float] , _a : int | float , _a : int | float , _a : int = 1_0_0 , ):
"""simple docstring"""
A = x_start
A = fnc(_a )
A = 0.0
for _ in range(_a ):
# Approximates curve as a sequence of linear lines and sums their length
A = (x_end - x_start) / steps + xa
A = fnc(_a )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
A = xa
A = fxa
return length
if __name__ == "__main__":
def _A ( _a : Tuple ):
"""simple docstring"""
return math.sin(1_0 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
UpperCAmelCase =10
while i <= 100_000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 77
| 1
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a__ : str = open # noqa: we just need to have a builtin inside this module to test it properly
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = KandinskyImgaImgPipeline
__SCREAMING_SNAKE_CASE : str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
__SCREAMING_SNAKE_CASE : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
__SCREAMING_SNAKE_CASE : int = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->int:
return 32
@property
def __lowerCAmelCase ( self ) ->List[str]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Tuple:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 100
@property
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE : Dict = MultilingualCLIP(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self ) ->Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str:
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : str = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE : Any = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Dict = pipeline(
_lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_a : Optional[int] = datasets.logging.get_logger(__name__)
_a : Optional[Any] = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
_a : List[Any] = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
_a : Optional[int] = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def _lowerCAmelCase ( lowercase , lowercase , lowercase=False , lowercase=False , lowercase=True , lowercase=False , lowercase="dummy_doc" ) -> List[str]:
__lowerCAmelCase = {doc: key_lines}
__lowerCAmelCase = {doc: sys_lines}
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(lowercase , key_doc_lines[doc] , lowercase )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(lowercase , sys_doc_lines[doc] , lowercase )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
if remove_nested:
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(lowercase , lowercase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(lowercase , lowercase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowerCAmelCase = reader.get_mention_assignments(lowercase , lowercase )
__lowerCAmelCase = reader.get_mention_assignments(lowercase , lowercase )
__lowerCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
"""Number of resulting singleton clusters in the key """
f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
"""files, respectively""" )
return doc_coref_infos
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
__lowerCAmelCase = get_coref_infos(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for name, metric in metrics:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = evaluator.evaluate_documents(lowercase , lowercase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , f'Recall: {recall * 100:.2f}' , f' Precision: {precision * 100:.2f}' , f' F1: {fa * 100:.2f}' , )
if conll_subparts_num == 3:
__lowerCAmelCase = (conll / 3) * 100
logger.info(f'CoNLL score: {conll:.2f}' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
__lowerCAmelCase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
__lowerCAmelCase = line.split()[5]
if not parse_col == "-":
__lowerCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ),codebase_urls=["""https://github.com/ns-moosavi/coval"""],reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
__lowerCAmelCase = util.check_gold_parse_annotation(__SCREAMING_SNAKE_CASE )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowerCAmelCase = evaluate(
key_lines=__SCREAMING_SNAKE_CASE,sys_lines=__SCREAMING_SNAKE_CASE,metrics=__SCREAMING_SNAKE_CASE,NP_only=__SCREAMING_SNAKE_CASE,remove_nested=__SCREAMING_SNAKE_CASE,keep_singletons=__SCREAMING_SNAKE_CASE,min_span=__SCREAMING_SNAKE_CASE,)
return score
| 46
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =KandinskyVaaImgaImgPipeline
a : List[Any] =["""image_embeds""", """negative_image_embeds""", """image"""]
a : Union[str, Any] =[
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a : Optional[int] =[
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Optional[Any] =False
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 1_00
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__lowerCAmelCase = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__lowerCAmelCase = DDIMScheduler(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = image.cpu().permute(0,2,3,1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ),return_dict=__SCREAMING_SNAKE_CASE,)[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__lowerCAmelCase = """A red cartoon frog, 4k"""
__lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""",torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""",torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=5,negative_prompt="""""",).to_tuple()
__lowerCAmelCase = pipeline(
image=__SCREAMING_SNAKE_CASE,image_embeds=__SCREAMING_SNAKE_CASE,negative_image_embeds=__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=1_00,height=7_68,width=7_68,strength=0.2,output_type="""np""",)
__lowerCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
| 46
| 1
|
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
a_ : Union[str, Any] = getLogger(__name__)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 8 , _UpperCAmelCase = 1024 , _UpperCAmelCase="val" , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase="summarization" , _UpperCAmelCase=None , _UpperCAmelCase=1 , _UpperCAmelCase = None , _UpperCAmelCase="" , **_UpperCAmelCase , ):
SCREAMING_SNAKE_CASE = str(_UpperCAmelCase)
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl' , rank=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = Path(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = save_dir.joinpath(F'''rank_{local_rank}_output.json''')
torch.cuda.set_device(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase).cuda()
if fpaa:
SCREAMING_SNAKE_CASE = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_UpperCAmelCase , _UpperCAmelCase) # update config with task specific params
SCREAMING_SNAKE_CASE = generate_kwargs.pop('num_beams' , model.config.num_beams) # AttributeError risk?
if num_return_sequences > num_beams:
SCREAMING_SNAKE_CASE = num_return_sequences
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_UpperCAmelCase)
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''') # if this is wrong, check config.model_type.
if max_source_length is None:
SCREAMING_SNAKE_CASE = tokenizer.model_max_length
if prefix is None:
SCREAMING_SNAKE_CASE = prefix or getattr(model.config , 'prefix' , '') or ''
SCREAMING_SNAKE_CASE = SeqaSeqDataset(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , max_target_length=1024 , type_path=_UpperCAmelCase , n_obs=_UpperCAmelCase , prefix=_UpperCAmelCase , **_UpperCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
SCREAMING_SNAKE_CASE = ds.make_sortish_sampler(_UpperCAmelCase , distributed=_UpperCAmelCase , add_extra_examples=_UpperCAmelCase , shuffle=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=ds.collate_fn)
SCREAMING_SNAKE_CASE = []
for batch in tqdm(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = model.generate(
input_ids=batch['input_ids'].to(model.device) , attention_mask=batch['attention_mask'].to(model.device) , num_return_sequences=_UpperCAmelCase , num_beams=_UpperCAmelCase , **_UpperCAmelCase , )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = batch['ids']
if num_return_sequences > 1:
SCREAMING_SNAKE_CASE = chunks(_UpperCAmelCase , _UpperCAmelCase) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_UpperCAmelCase):
results.append({'pred': pred, 'id': ids[i].item()})
save_json(_UpperCAmelCase , _UpperCAmelCase)
return results, sampler.num_replicas
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate')
parser.add_argument('--data_dir' , type=_UpperCAmelCase , help='like cnn_dm/test.source')
parser.add_argument(
'--model_name' , type=_UpperCAmelCase , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , )
parser.add_argument('--save_dir' , type=_UpperCAmelCase , help='where to save' , default='tmp_gen')
parser.add_argument('--max_source_length' , type=_UpperCAmelCase , default=_UpperCAmelCase)
parser.add_argument(
'--type_path' , type=_UpperCAmelCase , default='test' , help='which subset to evaluate typically train/val/test')
parser.add_argument('--task' , type=_UpperCAmelCase , default='summarization' , help='used for task_specific_params + metrics')
parser.add_argument('--bs' , type=_UpperCAmelCase , default=8 , required=_UpperCAmelCase , help='batch size')
parser.add_argument(
'--local_rank' , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help='should be passed by distributed.launch')
parser.add_argument(
'--n_obs' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='How many observations. Defaults to all.')
parser.add_argument(
'--num_return_sequences' , type=_UpperCAmelCase , default=1 , required=_UpperCAmelCase , help='How many sequences to return')
parser.add_argument(
'--sync_timeout' , type=_UpperCAmelCase , default=600 , required=_UpperCAmelCase , help='How long should master process wait for other processes to finish.' , )
parser.add_argument('--src_lang' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase)
parser.add_argument('--tgt_lang' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase)
parser.add_argument(
'--prefix' , type=_UpperCAmelCase , required=_UpperCAmelCase , default=_UpperCAmelCase , help='will be added to the begininng of src examples')
parser.add_argument('--fp16' , action='store_true')
parser.add_argument('--debug' , action='store_true')
SCREAMING_SNAKE_CASE = time.time()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_known_args()
SCREAMING_SNAKE_CASE = parse_numeric_n_bool_cl_kwargs(_UpperCAmelCase)
if generate_kwargs and args.local_rank <= 0:
print(F'''parsed the following generate kwargs: {generate_kwargs}''')
SCREAMING_SNAKE_CASE = Path(args.save_dir + '_tmp')
Path(_UpperCAmelCase).mkdir(exist_ok=_UpperCAmelCase) # this handles locking.
SCREAMING_SNAKE_CASE = list(json_save_dir.glob('rank_*.json'))
if intermediate_files:
raise ValueError(F'''Found files at {json_save_dir} please move or remove them.''')
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
SCREAMING_SNAKE_CASE = {}
if args.src_lang is not None:
SCREAMING_SNAKE_CASE = args.src_lang
if args.tgt_lang is not None:
SCREAMING_SNAKE_CASE = args.tgt_lang
Path(args.save_dir).mkdir(exist_ok=_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = eval_data_dir(
args.data_dir , _UpperCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_UpperCAmelCase , **_UpperCAmelCase , )
if args.local_rank <= 0:
SCREAMING_SNAKE_CASE = Path(args.save_dir)
save_dir.mkdir(exist_ok=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = gather_results_from_each_node(_UpperCAmelCase , _UpperCAmelCase , args.sync_timeout)
SCREAMING_SNAKE_CASE = combine_partial_results(_UpperCAmelCase)
if args.num_return_sequences > 1:
SCREAMING_SNAKE_CASE = save_dir.joinpath('pseudolabel_results.json')
print(F'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''')
save_json(_UpperCAmelCase , _UpperCAmelCase)
return
SCREAMING_SNAKE_CASE = Path(args.data_dir).joinpath(args.type_path + '.target')
with open(_UpperCAmelCase) as f:
SCREAMING_SNAKE_CASE = [x.rstrip() for x in f.readlines()][: len(_UpperCAmelCase)]
# Calculate metrics, save metrics, and save _generations.txt
SCREAMING_SNAKE_CASE = 'translation' in args.task
SCREAMING_SNAKE_CASE = calculate_bleu if calc_bleu else calculate_rouge
SCREAMING_SNAKE_CASE = 'bleu' if calc_bleu else 'rouge'
SCREAMING_SNAKE_CASE = score_fn(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = time.time() - start_time
SCREAMING_SNAKE_CASE = round(runtime / metrics['n_obs'] , 4)
SCREAMING_SNAKE_CASE = num_replicas
# TODO(@stas00): add whatever metadata to metrics
SCREAMING_SNAKE_CASE = save_dir.joinpath(F'''{args.type_path}_{metric_name}.json''')
save_json(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase)
print(_UpperCAmelCase)
write_txt_file(_UpperCAmelCase , save_dir.joinpath(F'''{args.type_path}_generations.txt'''))
if args.debug:
write_txt_file(_UpperCAmelCase , save_dir.joinpath(F'''{args.type_path}.target'''))
else:
shutil.rmtree(_UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
for partial_result in partial_results:
records.extend(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x["id"])
SCREAMING_SNAKE_CASE = [x['pred'] for x in records]
return preds
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
# WAIT FOR lots of .json files
SCREAMING_SNAKE_CASE = time.time()
logger.info('waiting for all nodes to finish')
SCREAMING_SNAKE_CASE = None
while (time.time() - start_wait) < timeout:
SCREAMING_SNAKE_CASE = list(save_dir.glob('rank_*.json'))
if len(_UpperCAmelCase) < num_replicas:
continue
try:
# make sure all json files are fully saved
SCREAMING_SNAKE_CASE = lmap(_UpperCAmelCase , _UpperCAmelCase)
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes')
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 137
|
from math import factorial
def lowerCamelCase__ (_UpperCAmelCase = 100):
return sum(int(_UpperCAmelCase) for x in str(factorial(_UpperCAmelCase)))
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 137
| 1
|
'''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Union[str, Any] = """EncodecFeatureExtractor"""
_lowerCamelCase : Tuple = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self : List[str] , snake_case_ : Tuple , snake_case_ : int ):
super().__init__(snake_case_ , snake_case_ )
_UpperCAmelCase = self.feature_extractor
_UpperCAmelCase = False
def lowercase ( self : Any , snake_case_ : Any=None , snake_case_ : Union[str, Any]=None , snake_case_ : Any=True ):
return self.tokenizer.get_decoder_prompt_ids(task=snake_case_ , language=snake_case_ , no_timestamps=snake_case_ )
def __call__( self : Dict , *snake_case_ : Tuple , **snake_case_ : Dict ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case_ , **snake_case_ )
_UpperCAmelCase = kwargs.pop("audio" , snake_case_ )
_UpperCAmelCase = kwargs.pop("sampling_rate" , snake_case_ )
_UpperCAmelCase = kwargs.pop("text" , snake_case_ )
if len(snake_case_ ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
_UpperCAmelCase = self.tokenizer(snake_case_ , **snake_case_ )
if audio is not None:
_UpperCAmelCase = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_UpperCAmelCase = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
_UpperCAmelCase = audio_inputs["padding_mask"]
return inputs
def lowercase ( self : int , *snake_case_ : Optional[int] , **snake_case_ : Any ):
_UpperCAmelCase = kwargs.pop("audio" , snake_case_ )
_UpperCAmelCase = kwargs.pop("padding_mask" , snake_case_ )
if len(snake_case_ ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(snake_case_ , padding_mask=snake_case_ )
else:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowercase ( self : Union[str, Any] , *snake_case_ : Dict , **snake_case_ : int ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
def lowercase ( self : str , snake_case_ : Union[str, Any] , snake_case_ : Optional = None ):
_UpperCAmelCase = to_numpy(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = audio_values.shape
if padding_mask is None:
return list(snake_case_ )
_UpperCAmelCase = to_numpy(snake_case_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_UpperCAmelCase = seq_len - padding_mask.shape[-1]
_UpperCAmelCase = 1 - self.feature_extractor.padding_value
_UpperCAmelCase = np.pad(snake_case_ , ((0, 0), (0, difference)) , "constant" , constant_values=snake_case_ )
_UpperCAmelCase = audio_values.tolist()
for i in range(snake_case_ ):
_UpperCAmelCase = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_UpperCAmelCase = sliced_audio.reshape(snake_case_ , -1 )
return audio_values
| 156
|
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__SCREAMING_SNAKE_CASE :Any = '''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
__SCREAMING_SNAKE_CASE :Tuple = '''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
__SCREAMING_SNAKE_CASE :Optional[int] = '''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowercase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def lowercase ( self : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[int]=None , snake_case_ : str=1 , snake_case_ : str="binary" , snake_case_ : int=None , snake_case_ : List[Any]="warn" , ):
_UpperCAmelCase = recall_score(
snake_case_ , snake_case_ , labels=snake_case_ , pos_label=snake_case_ , average=snake_case_ , sample_weight=snake_case_ , zero_division=snake_case_ , )
return {"recall": float(snake_case_ ) if score.size == 1 else score}
| 156
| 1
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCAmelCase_ ( A_ ,A_ ,A_ = False):
if radian_mode:
return [magnitude * cos(A_), magnitude * sin(A_)]
return [magnitude * cos(radians(A_)), magnitude * sin(radians(A_))]
def lowerCAmelCase_ ( A_ ,A_ ,A_ = 10**-1):
UpperCamelCase__: NDArray[floataa] = cross(A_ ,A_)
UpperCamelCase__: float = sum(A_)
return abs(A_) < eps
if __name__ == "__main__":
# Test to check if it works
A__: int = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
A__: NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
A__: Optional[Any] = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
A__: Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
A__: Optional[Any] = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
A__: Any = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 149
|
import os
from datetime import datetime as dt
from github import Github
A__: int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def lowerCAmelCase_ ( ):
UpperCamelCase__: Dict = Github(os.environ["GITHUB_TOKEN"])
UpperCamelCase__: Union[str, Any] = g.get_repo("huggingface/diffusers")
UpperCamelCase__: str = repo.get_issues(state="open")
for issue in open_issues:
UpperCamelCase__: Union[str, Any] = sorted(issue.get_comments() ,key=lambda A_: i.created_at ,reverse=A_)
UpperCamelCase__: Tuple = comments[0] if len(A_) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed")
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open")
issue.remove_from_labels("stale")
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored.")
issue.add_to_labels("stale")
if __name__ == "__main__":
main()
| 149
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class a_ ( lowerCamelCase ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 183
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger()
@dataclass
class a_ :
lowercase = 42
lowercase = field(default_factory=lowerCamelCase )
lowercase = field(default_factory=lowerCamelCase )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = len(list(m.modules() ) ) == 1 or isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ) or isinstance(_SCREAMING_SNAKE_CASE , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_SCREAMING_SNAKE_CASE )
[x.remove() for x in self.handles]
return self
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
return list(filter(lambda _SCREAMING_SNAKE_CASE : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a_ :
lowercase = 42
lowercase = 42
lowercase = 0
lowercase = field(default_factory=lowerCamelCase )
lowercase = field(default_factory=lowerCamelCase )
def __call__( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = Tracker(self.dest )(_SCREAMING_SNAKE_CASE ).parametrized
UpperCamelCase = Tracker(self.src )(_SCREAMING_SNAKE_CASE ).parametrized
UpperCamelCase = list(filter(lambda _SCREAMING_SNAKE_CASE : type(_SCREAMING_SNAKE_CASE ) not in self.src_skip , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase = list(filter(lambda _SCREAMING_SNAKE_CASE : type(_SCREAMING_SNAKE_CASE ) not in self.dest_skip , _SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise Exception(
F"Numbers of operations are different. Source module has {len(_SCREAMING_SNAKE_CASE )} operations while"
F" destination module has {len(_SCREAMING_SNAKE_CASE )}." )
for dest_m, src_m in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"Transfered from={src_m} to={dest_m}" )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True )-> Optional[Any]:
print(F"Converting {name}..." )
with torch.no_grad():
UpperCamelCase = timm.create_model(__UpperCamelCase , pretrained=__UpperCamelCase ).eval()
UpperCamelCase = ResNetForImageClassification(__UpperCamelCase ).eval()
UpperCamelCase = ModuleTransfer(src=__UpperCamelCase , dest=__UpperCamelCase )
UpperCamelCase = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCamelCase )
assert torch.allclose(from_model(__UpperCamelCase ) , our_model(__UpperCamelCase ).logits ), "The model logits don't match the original one."
UpperCamelCase = F"resnet{'-'.join(name.split('resnet' ) )}"
print(__UpperCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=__UpperCamelCase , )
# we can use the convnext one
UpperCamelCase = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=__UpperCamelCase , )
print(F"Pushed {checkpoint_name}" )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True )-> str:
UpperCamelCase = """imagenet-1k-id2label.json"""
UpperCamelCase = 1000
UpperCamelCase = (1, num_labels)
UpperCamelCase = """huggingface/label-files"""
UpperCamelCase = num_labels
UpperCamelCase = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = partial(__UpperCamelCase , num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
UpperCamelCase = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(__UpperCamelCase , names_to_config[model_name] , __UpperCamelCase , __UpperCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 183
| 1
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase : str = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
UpperCAmelCase : Union[str, Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
UpperCAmelCase : str = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
UpperCAmelCase : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
UpperCAmelCase : Union[str, Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
UpperCAmelCase : Dict = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = VOCAB_FILES_NAMES
_lowercase : List[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : str = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowercase : Dict = DPRContextEncoderTokenizer
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[str] = VOCAB_FILES_NAMES
_lowercase : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : int = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowercase : str = DPRQuestionEncoderTokenizer
UpperCAmelCase : List[str] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
UpperCAmelCase : List[str] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
UpperCAmelCase : Tuple = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCamelCase__)
class __lowerCAmelCase :
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
elif titles is None or texts is None:
a__ : Optional[Any] =titles if texts is None else texts
return super().__call__(
lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : Union[str, Any] =titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [titles]
a__ : List[str] =texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [texts]
a__ : int =len(lowerCAmelCase__ )
a__ : str =questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [questions] * n_passages
assert len(lowerCAmelCase__ ) == len(
lowerCAmelCase__ ), F'''There should be as many titles than texts but got {len(lowerCAmelCase__ )} titles and {len(lowerCAmelCase__ )} texts.'''
a__ : int =super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )["input_ids"]
a__ : str =super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )["input_ids"]
a__ : Optional[Any] ={
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
}
if return_attention_mask is not False:
a__ : List[Any] =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
a__ : Tuple =attention_mask
return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1_6 , lowerCAmelCase__ = 6_4 , lowerCAmelCase__ = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
a__ : Optional[Any] =reader_input["input_ids"]
a__ , a__ , a__ : Union[str, Any] =reader_output[:3]
a__ : Optional[int] =len(lowerCAmelCase__ )
a__ : Optional[Any] =sorted(range(lowerCAmelCase__ ) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__ )
a__ : List[DPRReaderOutput] =[]
for doc_id in sorted_docs:
a__ : List[str] =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
a__ : Union[str, Any] =sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
a__ : int =sequence_ids.index(self.pad_token_id )
else:
a__ : Optional[Any] =len(lowerCAmelCase__ )
a__ : Any =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCAmelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
a__ : Optional[int] =[]
for start_index, start_score in enumerate(lowerCAmelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
a__ : str =sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[1] , reverse=lowerCAmelCase__ )
a__ : Any =[]
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
a__ : List[Any] =end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase__)
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__):
_lowercase : List[Any] = VOCAB_FILES_NAMES
_lowercase : List[str] = READER_PRETRAINED_VOCAB_FILES_MAP
_lowercase : str = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : List[str] = READER_PRETRAINED_INIT_CONFIGURATION
_lowercase : str = ["""input_ids""", """attention_mask"""]
_lowercase : List[str] = DPRReaderTokenizer
| 95
|
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
a = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _snake_case ( _snake_case : Tuple ) -> Dict:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _snake_case ( _snake_case : str , _snake_case : List[Any] ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
_A = False
elif args.student_type == "gpt2":
_A = False
def _snake_case ( _snake_case : str , _snake_case : int ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
_A = False
def _snake_case ( ) -> Tuple:
'''simple docstring'''
_A = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=_snake_case , required=_snake_case , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=_snake_case , required=_snake_case , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=_snake_case , choices=['distilbert', 'roberta', 'gpt2'] , required=_snake_case , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=_snake_case , required=_snake_case , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=_snake_case , type=_snake_case , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_snake_case , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=_snake_case , required=_snake_case , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=_snake_case , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=_snake_case , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=_snake_case , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=_snake_case , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=_snake_case , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=_snake_case , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.15 , type=_snake_case , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=_snake_case , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=_snake_case , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=_snake_case , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=_snake_case , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=_snake_case , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=_snake_case , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=_snake_case , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=_snake_case , default=50 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.05 , type=_snake_case , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=_snake_case , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5E-4 , type=_snake_case , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1E-6 , type=_snake_case , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=_snake_case , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.02 , type=_snake_case , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_snake_case , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=_snake_case , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=_snake_case , default=56 , help='Random seed' )
parser.add_argument('--log_interval' , type=_snake_case , default=5_00 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=_snake_case , default=40_00 , help='Checkpoint interval.' )
_A = parser.parse_args()
sanity_checks(_snake_case )
# ARGS #
init_gpu_params(_snake_case )
set_seed(_snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(_snake_case ) , _snake_case , indent=4 )
git_log(args.dump_path )
_A , _A , _A = MODEL_CLASSES[args.student_type]
_A , _A , _A = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_A = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_A = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_A = tokenizer.all_special_tokens.index(_snake_case )
_A = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
_A = special_tok_ids
_A = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , 'rb' ) as fp:
_A = pickle.load(_snake_case )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , 'rb' ) as fp:
_A = pickle.load(_snake_case )
_A = np.maximum(_snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_A = 0.0 # do not predict special tokens
_A = torch.from_numpy(_snake_case )
else:
_A = None
_A = LmSeqsDataset(params=_snake_case , data=_snake_case )
logger.info('Data loader created.' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
_A = student_config_class.from_pretrained(args.student_config )
_A = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
_A = student_model_class.from_pretrained(args.student_pretrained_weights , config=_snake_case )
else:
_A = student_model_class(_snake_case )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('Student loaded.' )
# TEACHER #
_A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_snake_case )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_snake_case , _snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_snake_case , _snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_A = Distiller(
params=_snake_case , dataset=_snake_case , token_probs=_snake_case , student=_snake_case , teacher=_snake_case )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 315
| 0
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
A_ : Dict = logging.get_logger(__name__)
# General docstring
A_ : Tuple = 'ResNetConfig'
# Base docstring
A_ : Optional[Any] = 'microsoft/resnet-50'
A_ : Any = [1, 2048, 7, 7]
# Image classification docstring
A_ : List[Any] = 'microsoft/resnet-50'
A_ : Optional[int] = 'tiger cat'
A_ : Tuple = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class _a (nn.Module ):
'''simple docstring'''
def __init__( self , A__ , A__ , A__ = 3 , A__ = 1 , A__ = "relu" ):
super().__init__()
A__ : int = nn.Convad(
A__ , A__ , kernel_size=A__ , stride=A__ , padding=kernel_size // 2 , bias=A__ )
A__ : Optional[int] = nn.BatchNormad(A__ )
A__ : Dict = ACTaFN[activation] if activation is not None else nn.Identity()
def __A ( self , A__ ):
A__ : Any = self.convolution(A__ )
A__ : Any = self.normalization(A__ )
A__ : int = self.activation(A__ )
return hidden_state
class _a (nn.Module ):
'''simple docstring'''
def __init__( self , A__ ):
super().__init__()
A__ : int = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
A__ : Tuple = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
A__ : Optional[int] = config.num_channels
def __A ( self , A__ ):
A__ : Tuple = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
A__ : Any = self.embedder(A__ )
A__ : Union[str, Any] = self.pooler(A__ )
return embedding
class _a (nn.Module ):
'''simple docstring'''
def __init__( self , A__ , A__ , A__ = 2 ):
super().__init__()
A__ : Optional[int] = nn.Convad(A__ , A__ , kernel_size=1 , stride=A__ , bias=A__ )
A__ : Dict = nn.BatchNormad(A__ )
def __A ( self , A__ ):
A__ : int = self.convolution(A__ )
A__ : Optional[Any] = self.normalization(A__ )
return hidden_state
class _a (nn.Module ):
'''simple docstring'''
def __init__( self , A__ , A__ , A__ = 1 , A__ = "relu" ):
super().__init__()
A__ : Any = in_channels != out_channels or stride != 1
A__ : List[str] = (
ResNetShortCut(A__ , A__ , stride=A__ ) if should_apply_shortcut else nn.Identity()
)
A__ : List[Any] = nn.Sequential(
ResNetConvLayer(A__ , A__ , stride=A__ ) , ResNetConvLayer(A__ , A__ , activation=A__ ) , )
A__ : List[Any] = ACTaFN[activation]
def __A ( self , A__ ):
A__ : Optional[Any] = hidden_state
A__ : Any = self.layer(A__ )
A__ : Any = self.shortcut(A__ )
hidden_state += residual
A__ : Dict = self.activation(A__ )
return hidden_state
class _a (nn.Module ):
'''simple docstring'''
def __init__( self , A__ , A__ , A__ = 1 , A__ = "relu" , A__ = 4 ):
super().__init__()
A__ : str = in_channels != out_channels or stride != 1
A__ : List[Any] = out_channels // reduction
A__ : Tuple = (
ResNetShortCut(A__ , A__ , stride=A__ ) if should_apply_shortcut else nn.Identity()
)
A__ : List[Any] = nn.Sequential(
ResNetConvLayer(A__ , A__ , kernel_size=1 ) , ResNetConvLayer(A__ , A__ , stride=A__ ) , ResNetConvLayer(A__ , A__ , kernel_size=1 , activation=A__ ) , )
A__ : Dict = ACTaFN[activation]
def __A ( self , A__ ):
A__ : List[str] = hidden_state
A__ : Tuple = self.layer(A__ )
A__ : List[Any] = self.shortcut(A__ )
hidden_state += residual
A__ : Any = self.activation(A__ )
return hidden_state
class _a (nn.Module ):
'''simple docstring'''
def __init__( self , A__ , A__ , A__ , A__ = 2 , A__ = 2 , ):
super().__init__()
A__ : Tuple = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
A__ : int = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(A__ , A__ , stride=A__ , activation=config.hidden_act ) , *[layer(A__ , A__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def __A ( self , A__ ):
A__ : Optional[int] = input
for layer in self.layers:
A__ : Dict = layer(A__ )
return hidden_state
class _a (nn.Module ):
'''simple docstring'''
def __init__( self , A__ ):
super().__init__()
A__ : Dict = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
A__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
A__ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(A__ , config.depths[1:] ):
self.stages.append(ResNetStage(A__ , A__ , A__ , depth=A__ ) )
def __A ( self , A__ , A__ = False , A__ = True ):
A__ : Optional[int] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A__ : Tuple = hidden_states + (hidden_state,)
A__ : Optional[int] = stage_module(A__ )
if output_hidden_states:
A__ : str = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=A__ , hidden_states=A__ , )
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = ResNetConfig
UpperCAmelCase__: Optional[Any] = '''resnet'''
UpperCAmelCase__: List[Any] = '''pixel_values'''
UpperCAmelCase__: List[str] = True
def __A ( self , A__ ):
if isinstance(A__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(A__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __A ( self , A__ , A__=False ):
if isinstance(A__ , A__ ):
A__ : str = value
A_ : Union[str, Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
A_ : Optional[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , __magic_name__ , )
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ ):
super().__init__(A__ )
A__ : Tuple = config
A__ : Union[str, Any] = ResNetEmbeddings(A__ )
A__ : List[Any] = ResNetEncoder(A__ )
A__ : Tuple = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __A ( self , A__ , A__ = None , A__ = None ):
A__ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
A__ : Optional[int] = self.embedder(A__ )
A__ : List[str] = self.encoder(
A__ , output_hidden_states=A__ , return_dict=A__ )
A__ : Any = encoder_outputs[0]
A__ : List[str] = self.pooler(A__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A__ , pooler_output=A__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __magic_name__ , )
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ ):
super().__init__(A__ )
A__ : Dict = config.num_labels
A__ : Optional[int] = ResNetModel(A__ )
# classification head
A__ : Optional[Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __A ( self , A__ = None , A__ = None , A__ = None , A__ = None , ):
A__ : str = return_dict if return_dict is not None else self.config.use_return_dict
A__ : List[str] = self.resnet(A__ , output_hidden_states=A__ , return_dict=A__ )
A__ : List[Any] = outputs.pooler_output if return_dict else outputs[1]
A__ : List[Any] = self.classifier(A__ )
A__ : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ : Dict = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ : Tuple = """single_label_classification"""
else:
A__ : Any = """multi_label_classification"""
if self.config.problem_type == "regression":
A__ : Tuple = MSELoss()
if self.num_labels == 1:
A__ : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ : Optional[Any] = loss_fct(A__ , A__ )
elif self.config.problem_type == "single_label_classification":
A__ : Tuple = CrossEntropyLoss()
A__ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ : Optional[Any] = BCEWithLogitsLoss()
A__ : Dict = loss_fct(A__ , A__ )
if not return_dict:
A__ : List[str] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=A__ , logits=A__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , __magic_name__ , )
class _a (__magic_name__ , __magic_name__ ):
'''simple docstring'''
def __init__( self , A__ ):
super().__init__(A__ )
super()._init_backbone(A__ )
A__ : Union[str, Any] = [config.embedding_size] + config.hidden_sizes
A__ : int = ResNetEmbeddings(A__ )
A__ : Union[str, Any] = ResNetEncoder(A__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A__ )
@replace_return_docstrings(output_type=A__ , config_class=_CONFIG_FOR_DOC )
def __A ( self , A__ , A__ = None , A__ = None ):
A__ : Any = return_dict if return_dict is not None else self.config.use_return_dict
A__ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ : Tuple = self.embedder(A__ )
A__ : str = self.encoder(A__ , output_hidden_states=A__ , return_dict=A__ )
A__ : Tuple = outputs.hidden_states
A__ : Tuple = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
A__ : Optional[Any] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=A__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=A__ , )
| 141
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A_ : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=A__ , speech_processor=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , unet=A__ , scheduler=A__ , feature_extractor=A__ , )
def __A ( self , A__ = "auto" ):
if slice_size == "auto":
A__ : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A__ )
def __A ( self ):
self.enable_attention_slicing(A__ )
@torch.no_grad()
def __call__( self , A__ , A__=1_6000 , A__ = 512 , A__ = 512 , A__ = 50 , A__ = 7.5 , A__ = None , A__ = 1 , A__ = 0.0 , A__ = None , A__ = None , A__ = "pil" , A__ = True , A__ = None , A__ = 1 , **A__ , ):
A__ : Any = self.speech_processor.feature_extractor(
A__ , return_tensors="""pt""" , sampling_rate=A__ ).input_features.to(self.device )
A__ : Optional[Any] = self.speech_model.generate(A__ , max_length=48_0000 )
A__ : Union[str, Any] = self.speech_processor.tokenizer.batch_decode(A__ , skip_special_tokens=A__ , normalize=A__ )[
0
]
if isinstance(A__ , A__ ):
A__ : Dict = 1
elif isinstance(A__ , A__ ):
A__ : Optional[int] = len(A__ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(A__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ , A__ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(A__ )}.""" )
# get prompt text embeddings
A__ : Optional[int] = self.tokenizer(
A__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
A__ : Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A__ : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
A__ : Dict = text_input_ids[:, : self.tokenizer.model_max_length]
A__ : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A__ , A__ , A__ : List[str] = text_embeddings.shape
A__ : Dict = text_embeddings.repeat(1 , A__ , 1 )
A__ : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A__ : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A__ : List[str]
if negative_prompt is None:
A__ : Union[str, Any] = [""""""] * batch_size
elif type(A__ ) is not type(A__ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(A__ )} !="""
F""" {type(A__ )}.""" )
elif isinstance(A__ , A__ ):
A__ : Union[str, Any] = [negative_prompt]
elif batch_size != len(A__ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(A__ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
A__ : int = negative_prompt
A__ : Union[str, Any] = text_input_ids.shape[-1]
A__ : int = self.tokenizer(
A__ , padding="""max_length""" , max_length=A__ , truncation=A__ , return_tensors="""pt""" , )
A__ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A__ : List[Any] = uncond_embeddings.shape[1]
A__ : Any = uncond_embeddings.repeat(1 , A__ , 1 )
A__ : Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , A__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A__ : Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A__ : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A__ : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A__ : Optional[Any] = torch.randn(A__ , generator=A__ , device="""cpu""" , dtype=A__ ).to(
self.device )
else:
A__ : str = torch.randn(A__ , generator=A__ , device=self.device , dtype=A__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
A__ : Any = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A__ : List[str] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A__ : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A__ : Any = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A__ : Tuple = {}
if accepts_eta:
A__ : str = eta
for i, t in enumerate(self.progress_bar(A__ ) ):
# expand the latents if we are doing classifier free guidance
A__ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A__ : Tuple = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
A__ : Union[str, Any] = self.unet(A__ , A__ , encoder_hidden_states=A__ ).sample
# perform guidance
if do_classifier_free_guidance:
A__ , A__ : List[Any] = noise_pred.chunk(2 )
A__ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A__ : Tuple = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ , A__ , A__ )
A__ : str = 1 / 0.1_8_2_1_5 * latents
A__ : Optional[Any] = self.vae.decode(A__ ).sample
A__ : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A__ : Optional[Any] = self.numpy_to_pil(A__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=A__ , nsfw_content_detected=A__ )
| 141
| 1
|
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :str ) -> Tuple:
'''simple docstring'''
_a : List[Any] =AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
_a : Any =AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(SCREAMING_SNAKE_CASE )
from datasets import load_dataset
_a : int =load_dataset("""nielsr/rvlcdip-demo""" )
_a : Optional[Any] =dataset["""train"""][0]["""image"""].convert("""RGB""" )
_a : List[str] =image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_a : Dict =model(**SCREAMING_SNAKE_CASE )
_a : Optional[int] =outputs.logits
_a : List[Any] =torch.Size((1, 1_6) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE )
_a : int =torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=SCREAMING_SNAKE_CASE , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 276
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__: List[str] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Optional[int] = ['''ChineseCLIPFeatureExtractor''']
A__: Any = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Dict = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
A__: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276
| 1
|
def __lowercase ( a__ ) -> int:
__SCREAMING_SNAKE_CASE = [[0 for _ in range(a__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__SCREAMING_SNAKE_CASE = 1
for n in range(m + 1 ):
for k in range(1 , a__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowerCAmelCase__ : Optional[Any] =int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
lowerCAmelCase__ : str =int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 352
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : Optional[int] ={'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] =['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[Any] =['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Tuple =[
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118
| 0
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=0.2 , lowerCAmelCase_=0.2 ) -> List[str]:
_A = bp_numa
_A = bp_numa
_A = bp_numa
_A = conva_get[:2]
_A = conva_get[2]
_A = size_pa
_A = rate_w
_A = rate_t
_A = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
_A = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
_A = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
_A = -2 * np.random.rand(self.conva[1] ) + 1
_A = -2 * np.random.rand(self.num_bpa ) + 1
_A = -2 * np.random.rand(self.num_bpa ) + 1
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
# save model dict with pickle
_A = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowerCAmelCase_ , """wb""" ) as f:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
print(F'''Model saved: {save_path}''' )
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ ) -> str:
# read saved model
with open(lowerCAmelCase_ , """rb""" ) as f:
_A = pickle.load(lowerCAmelCase_ ) # noqa: S301
_A = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
_A = model_dic.get("""size_pooling1""" )
_A = model_dic.get("""num_bp1""" )
_A = model_dic.get("""num_bp2""" )
_A = model_dic.get("""num_bp3""" )
_A = model_dic.get("""rate_weight""" )
_A = model_dic.get("""rate_thre""" )
# create model instance
_A = CNN(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# modify model parameter
_A = model_dic.get("""w_conv1""" )
_A = model_dic.get("""wkj""" )
_A = model_dic.get("""vji""" )
_A = model_dic.get("""thre_conv1""" )
_A = model_dic.get("""thre_bp2""" )
_A = model_dic.get("""thre_bp3""" )
return conv_ins
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
return 1 / (1 + np.exp(-1 * x ))
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
return round(lowerCAmelCase_ , 3 )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
# convolution process
_A = convs[0]
_A = convs[1]
_A = np.shape(lowerCAmelCase_ )[0]
# get the data slice of original image data, data_focus
_A = []
for i_focus in range(0 , size_data - size_conv + 1 , lowerCAmelCase_ ):
for j_focus in range(0 , size_data - size_conv + 1 , lowerCAmelCase_ ):
_A = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCAmelCase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
_A = []
_A = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCAmelCase_ ):
_A = []
for i_focus in range(len(lowerCAmelCase_ ) ):
_A = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCAmelCase_ ) )
_A = np.asmatrix(lowerCAmelCase_ ).reshape(
lowerCAmelCase_ , lowerCAmelCase_ )
data_featuremap.append(lowerCAmelCase_ )
# expanding the data slice to One dimenssion
_A = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCAmelCase_ ) )
_A = np.asarray(lowerCAmelCase_ )
return focus_list, data_featuremap
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="average_pool" ) -> Any:
# pooling process
_A = len(featuremaps[0] )
_A = int(size_map / size_pooling )
_A = []
for i_map in range(len(lowerCAmelCase_ ) ):
_A = featuremaps[i_map]
_A = []
for i_focus in range(0 , lowerCAmelCase_ , lowerCAmelCase_ ):
for j_focus in range(0 , lowerCAmelCase_ , lowerCAmelCase_ ):
_A = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCAmelCase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCAmelCase_ ) )
_A = np.asmatrix(lowerCAmelCase_ ).reshape(lowerCAmelCase_ , lowerCAmelCase_ )
featuremap_pooled.append(lowerCAmelCase_ )
return featuremap_pooled
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
# expanding three dimension data to one dimension list
_A = []
for i in range(len(lowerCAmelCase_ ) ):
_A = np.shape(data[i] )
_A = data[i].reshape(1 , shapes[0] * shapes[1] )
_A = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCAmelCase_ )
_A = np.asarray(lowerCAmelCase_ )
return data_expanded
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
# expanding matrix to one dimension list
_A = np.asarray(lowerCAmelCase_ )
_A = np.shape(lowerCAmelCase_ )
_A = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_A = []
_A = 0
for i_map in range(lowerCAmelCase_ ):
_A = np.ones((size_map, size_map) )
for i in range(0 , lowerCAmelCase_ , lowerCAmelCase_ ):
for j in range(0 , lowerCAmelCase_ , lowerCAmelCase_ ):
_A = pd_pool[
i_pool
]
_A = i_pool + 1
_A = np.multiply(
lowerCAmelCase_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowerCAmelCase_ )
return pd_all
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=bool ) -> Tuple:
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowerCAmelCase_ )) )
print((""" - - Shape: Teach_Data """, np.shape(lowerCAmelCase_ )) )
_A = 0
_A = []
_A = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
_A = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(lowerCAmelCase_ ) ):
# print('------------Learning Image: %d--------------'%p)
_A = np.asmatrix(datas_train[p] )
_A = np.asarray(datas_teach[p] )
_A , _A = self.convolute(
lowerCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A = self.pooling(lowerCAmelCase_ , self.size_poolinga )
_A = np.shape(lowerCAmelCase_ )
_A = self._expand(lowerCAmelCase_ )
_A = data_bp_input
_A = np.dot(lowerCAmelCase_ , self.vji.T ) - self.thre_bpa
_A = self.sig(lowerCAmelCase_ )
_A = np.dot(lowerCAmelCase_ , self.wkj.T ) - self.thre_bpa
_A = self.sig(lowerCAmelCase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_A = np.multiply(
(data_teach - bp_outa) , np.multiply(lowerCAmelCase_ , (1 - bp_outa) ) )
_A = np.multiply(
np.dot(lowerCAmelCase_ , self.wkj ) , np.multiply(lowerCAmelCase_ , (1 - bp_outa) ) )
_A = np.dot(lowerCAmelCase_ , self.vji )
_A = pd_i_all / (self.size_poolinga * self.size_poolinga)
_A = pd_conva_pooled.T.getA().tolist()
_A = self._calculate_gradient_from_pool(
lowerCAmelCase_ , lowerCAmelCase_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
_A = self._expand_mat(pd_conva_all[k_conv] )
_A = self.rate_weight * np.dot(lowerCAmelCase_ , lowerCAmelCase_ )
_A = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
_A = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
_A = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_A = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_A = self.thre_bpa - pd_k_all * self.rate_thre
_A = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_A = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_A = rp + 1
_A = error_count / patterns
all_mse.append(lowerCAmelCase_ )
def draw_error():
_A = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCAmelCase_ , """+-""" )
plt.plot(lowerCAmelCase_ , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowerCAmelCase_ , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
# model predict
_A = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowerCAmelCase_ )) )
for p in range(len(lowerCAmelCase_ ) ):
_A = np.asmatrix(datas_test[p] )
_A , _A = self.convolute(
lowerCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A = self.pooling(lowerCAmelCase_ , self.size_poolinga )
_A = self._expand(lowerCAmelCase_ )
_A = data_bp_input
_A = bp_outa * self.vji.T - self.thre_bpa
_A = self.sig(lowerCAmelCase_ )
_A = bp_outa * self.wkj.T - self.thre_bpa
_A = self.sig(lowerCAmelCase_ )
produce_out.extend(bp_outa.getA().tolist() )
_A = [list(map(self.do_round , lowerCAmelCase_ ) ) for each in produce_out]
return np.asarray(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
# return the data of image after convoluting process so we can check it out
_A = np.asmatrix(lowerCAmelCase_ )
_A , _A = self.convolute(
lowerCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A = self.pooling(lowerCAmelCase_ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 180
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class a ( __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Any = '''resnet'''
lowerCamelCase :Any = ['''basic''', '''bottleneck''']
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=64 , lowerCAmelCase_=[2_56, 5_12, 10_24, 20_48] , lowerCAmelCase_=[3, 4, 6, 3] , lowerCAmelCase_="bottleneck" , lowerCAmelCase_="relu" , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Union[str, Any]:
super().__init__(**lowerCAmelCase_ )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
_A = num_channels
_A = embedding_size
_A = hidden_sizes
_A = depths
_A = layer_type
_A = hidden_act
_A = downsample_in_first_stage
_A = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
_A , _A = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[Any] = version.parse('''1.11''' )
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase ( self ) -> float:
return 1E-3
| 180
| 1
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCAmelCase_ ) )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if index == len(lowerCAmelCase_ ):
return True
# Recursive Step
for i in range(lowerCAmelCase_ ):
if valid_coloring(graph[index] , lowerCAmelCase_ , lowerCAmelCase_ ):
# Color current vertex
__SCREAMING_SNAKE_CASE = i
# Validate coloring
if util_color(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , index + 1 ):
return True
# Backtrack
__SCREAMING_SNAKE_CASE = -1
return False
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [-1] * len(lowerCAmelCase_ )
if util_color(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , 0 ):
return colored_vertices
return []
| 195
|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return " ".join(
"".join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 195
| 1
|
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
snake_case_ : int = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def A__ ( UpperCAmelCase_ , UpperCAmelCase_=None ):
require_version(deps[pkg] , _A )
| 83
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "gptsan-japanese"
_UpperCAmelCase = [
"past_key_values",
]
_UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: Optional[Any] , UpperCamelCase: List[str]=3_60_00 , UpperCamelCase: List[str]=12_80 , UpperCamelCase: List[Any]=10_24 , UpperCamelCase: Any=81_92 , UpperCamelCase: Dict=40_96 , UpperCamelCase: Optional[int]=1_28 , UpperCamelCase: Any=10 , UpperCamelCase: List[Any]=0 , UpperCamelCase: Dict=16 , UpperCamelCase: Tuple=16 , UpperCamelCase: Union[str, Any]=1_28 , UpperCamelCase: List[Any]=0.0 , UpperCamelCase: Union[str, Any]=1e-5 , UpperCamelCase: int=False , UpperCamelCase: Optional[int]=0.0 , UpperCamelCase: Dict="float32" , UpperCamelCase: Any=False , UpperCamelCase: Dict=False , UpperCamelCase: List[str]=False , UpperCamelCase: Union[str, Any]=0.002 , UpperCamelCase: int=False , UpperCamelCase: str=True , UpperCamelCase: Dict=3_59_98 , UpperCamelCase: Optional[Any]=3_59_95 , UpperCamelCase: Optional[Any]=3_59_99 , **UpperCamelCase: Optional[int] , ) -> Optional[int]:
snake_case__ = vocab_size
snake_case__ = max_position_embeddings
snake_case__ = d_model
snake_case__ = d_ff
snake_case__ = d_ext
snake_case__ = d_spout
snake_case__ = num_switch_layers
snake_case__ = num_ext_layers
snake_case__ = num_switch_layers + num_ext_layers
snake_case__ = num_heads
snake_case__ = num_experts
snake_case__ = expert_capacity
snake_case__ = dropout_rate
snake_case__ = layer_norm_epsilon
snake_case__ = router_bias
snake_case__ = router_jitter_noise
snake_case__ = router_dtype
snake_case__ = router_ignore_padding_tokens
snake_case__ = output_hidden_states
snake_case__ = output_attentions
snake_case__ = initializer_factor
snake_case__ = output_router_logits
snake_case__ = use_cache
super().__init__(
separator_token_id=UpperCamelCase , pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase , )
| 307
| 0
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :Any = logging.get_logger(__name__)
__a :List[Any] = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class _a ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_lowerCamelCase : Any = 'mvp'
_lowerCamelCase : Optional[int] = ['past_key_values']
_lowerCamelCase : Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any] , UpperCAmelCase : Optional[int]=50267 , UpperCAmelCase : int=1024 , UpperCAmelCase : Optional[int]=12 , UpperCAmelCase : Any=4096 , UpperCAmelCase : Tuple=16 , UpperCAmelCase : Optional[Any]=12 , UpperCAmelCase : Union[str, Any]=4096 , UpperCAmelCase : List[str]=16 , UpperCAmelCase : Union[str, Any]=0.0 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Optional[Any]="gelu" , UpperCAmelCase : Any=1024 , UpperCAmelCase : int=0.1 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Dict=0.02 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : List[str]=False , UpperCAmelCase : Dict=True , UpperCAmelCase : Any=1 , UpperCAmelCase : Tuple=0 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int=2 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : List[str]=False , UpperCAmelCase : Dict=100 , UpperCAmelCase : Tuple=800 , **UpperCAmelCase : Any , ):
A_ = vocab_size
A_ = max_position_embeddings
A_ = d_model
A_ = encoder_ffn_dim
A_ = encoder_layers
A_ = encoder_attention_heads
A_ = decoder_ffn_dim
A_ = decoder_layers
A_ = decoder_attention_heads
A_ = dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = activation_function
A_ = init_std
A_ = encoder_layerdrop
A_ = decoder_layerdrop
A_ = classifier_dropout
A_ = use_cache
A_ = encoder_layers
A_ = scale_embedding # scale factor will be sqrt(d_model) if True
A_ = use_prompt
A_ = prompt_length
A_ = prompt_mid_dim
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , snake_case__ ):
A_ = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"The config can simply be saved and uploaded again to be fixed." )
| 353
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if is_torch_version("<" ,"2.0.0" ) or not hasattr(__UpperCamelCase ,"_dynamo" ):
return False
return isinstance(__UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : bool = True ):
"""simple docstring"""
A_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
A_ = is_compiled_module(__UpperCamelCase )
if is_compiled:
A_ = model
A_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = model.module
if not keep_fpaa_wrapper:
A_ = getattr(__UpperCamelCase ,"forward" )
A_ = model.__dict__.pop("_original_forward" ,__UpperCamelCase )
if original_forward is not None:
while hasattr(__UpperCamelCase ,"__wrapped__" ):
A_ = forward.__wrapped__
if forward == original_forward:
break
A_ = forward
if getattr(__UpperCamelCase ,"_converted_to_transformer_engine" ,__UpperCamelCase ):
convert_model(__UpperCamelCase ,to_transformer_engine=__UpperCamelCase )
if is_compiled:
A_ = model
A_ = compiled_model
return model
def __snake_case ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__UpperCamelCase ,__UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__UpperCamelCase ,__UpperCamelCase )
@contextmanager
def __snake_case ( **__UpperCamelCase : Any ):
"""simple docstring"""
for key, value in kwargs.items():
A_ = str(__UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if not hasattr(__UpperCamelCase ,"__qualname__" ) and not hasattr(__UpperCamelCase ,"__name__" ):
A_ = getattr(__UpperCamelCase ,"__class__" ,__UpperCamelCase )
if hasattr(__UpperCamelCase ,"__qualname__" ):
return obj.__qualname__
if hasattr(__UpperCamelCase ,"__name__" ):
return obj.__name__
return str(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
for key, value in source.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = destination.setdefault(__UpperCamelCase ,{} )
merge_dicts(__UpperCamelCase ,__UpperCamelCase )
else:
A_ = value
return destination
def __snake_case ( __UpperCamelCase : int = None ):
"""simple docstring"""
if port is None:
A_ = 2_9500
with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 329
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__lowerCAmelCase : Any =logging.get_logger(__name__)
__lowerCAmelCase : str ={
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''imagegpt'''
SCREAMING_SNAKE_CASE__ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ : str = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :Union[str, Any] , lowerCAmelCase__ :Any=512 + 1 , lowerCAmelCase__ :int=32 * 32 , lowerCAmelCase__ :Optional[Any]=512 , lowerCAmelCase__ :Union[str, Any]=24 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :Dict="quick_gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Any=0.1 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Any=0.02 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :int=False , lowerCAmelCase__ :Tuple=False , **lowerCAmelCase__ :Union[str, Any] , ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Any = n_positions
__SCREAMING_SNAKE_CASE : List[str] = n_embd
__SCREAMING_SNAKE_CASE : int = n_layer
__SCREAMING_SNAKE_CASE : Any = n_head
__SCREAMING_SNAKE_CASE : Any = n_inner
__SCREAMING_SNAKE_CASE : Optional[int] = activation_function
__SCREAMING_SNAKE_CASE : Dict = resid_pdrop
__SCREAMING_SNAKE_CASE : int = embd_pdrop
__SCREAMING_SNAKE_CASE : Any = attn_pdrop
__SCREAMING_SNAKE_CASE : str = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : List[str] = scale_attn_weights
__SCREAMING_SNAKE_CASE : str = use_cache
__SCREAMING_SNAKE_CASE : int = scale_attn_by_inverse_layer_idx
__SCREAMING_SNAKE_CASE : List[str] = reorder_and_upcast_attn
__SCREAMING_SNAKE_CASE : Dict = tie_word_embeddings
super().__init__(tie_word_embeddings=lowerCAmelCase__ , **lowerCAmelCase__ )
class _lowercase ( A__ ):
'''simple docstring'''
@property
def __magic_name__( self :Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :"FeatureExtractionMixin" , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional["TensorType"] = None , lowerCAmelCase__ :int = 3 , lowerCAmelCase__ :int = 32 , lowerCAmelCase__ :int = 32 , ) -> Mapping[str, Any]:
__SCREAMING_SNAKE_CASE : List[str] = self._generate_dummy_images(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) )
return inputs
| 9
|
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __magic_name__ ( lowercase ):
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[str] =ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(lowercase )
EnvironmentCommand.register_subcommand(lowercase )
TestCommand.register_subcommand(lowercase )
RunBeamCommand.register_subcommand(lowercase )
DummyDataCommand.register_subcommand(lowercase )
# Parse args
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_known_args()
if not hasattr(lowercase , """func""" ):
parser.print_help()
exit(1 )
SCREAMING_SNAKE_CASE_: Dict =parse_unknown_args(lowercase )
# Run
SCREAMING_SNAKE_CASE_: Tuple =args.func(lowercase , **lowercase )
service.run()
if __name__ == "__main__":
main()
| 173
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Any = BarthezTokenizer
_UpperCAmelCase :Dict = BarthezTokenizerFast
_UpperCAmelCase :List[str] = True
_UpperCAmelCase :Dict = True
def __UpperCamelCase( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase : Tuple = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=A_ )
UpperCamelCase : Any = tokenizer
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = "<pad>"
UpperCamelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(A_ ) , 10_1122 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCamelCase : str = [0, 57, 3018, 7_0307, 91, 2]
UpperCamelCase : List[str] = self.tokenizer(
A_ , max_length=len(A_ ) , padding=A_ , truncation=A_ , return_tensors="pt" )
self.assertIsInstance(A_ , A_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase : int = batch.input_ids.tolist()[0]
self.assertListEqual(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCamelCase : Any = self.get_tokenizer()
UpperCamelCase : int = self.get_rust_tokenizer()
UpperCamelCase : Optional[Any] = "I was born in 92000, and this is falsé."
UpperCamelCase : Optional[Any] = tokenizer.tokenize(A_ )
UpperCamelCase : List[Any] = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase : Union[str, Any] = tokenizer.encode(A_ , add_special_tokens=A_ )
UpperCamelCase : Dict = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase : List[str] = self.get_rust_tokenizer()
UpperCamelCase : Tuple = tokenizer.encode(A_ )
UpperCamelCase : int = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = {"input_ids": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase : Tuple = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=A_ , )
| 140
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=13 , A_=3 , A_=224 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
UpperCamelCase : Any = size if size is not None else {"height": 18, "width": 18}
UpperCamelCase : Tuple = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : Tuple = num_channels
UpperCamelCase : str = image_size
UpperCamelCase : Optional[int] = min_resolution
UpperCamelCase : List[Any] = max_resolution
UpperCamelCase : Union[str, Any] = do_resize
UpperCamelCase : str = size
UpperCamelCase : List[str] = do_normalize
UpperCamelCase : Any = image_mean
UpperCamelCase : int = image_std
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Tuple = ViTImageProcessor if is_vision_available() else None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = EfficientFormerImageProcessorTester(self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCamelCase : Tuple = image_processor(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase : Dict = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCamelCase : Optional[Any] = image_processor(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase : Any = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCamelCase : Any = image_processor(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 140
| 1
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : int ):
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_a = []
_a = 11
_a = int('''1''' + '''0''' * digit_len )
for num in range(_lowerCAmelCase, _lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCAmelCase, _lowerCAmelCase ):
solutions.append(f'{num}/{den}' )
den += 1
num += 1
_a = 10
return solutions
def A_ ( _lowerCAmelCase : int = 2 ):
"""simple docstring"""
_a = 1.0
for fraction in fraction_list(_lowerCAmelCase ):
_a = Fraction(_lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 320
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__snake_case = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[str] = ['pixel_values']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> None:
super().__init__(**__UpperCAmelCase )
_a = size if size is not None else {'''shortest_edge''': 224}
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_a = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_a = image_std if image_std is not None else OPENAI_CLIP_STD
_a = do_convert_rgb
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_a = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[Any]:
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image:
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a = [convert_to_rgb(__UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_a = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
_a = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
_a = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
_a = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
_a = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 320
| 1
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = (KDPMaDiscreteScheduler,)
UpperCAmelCase__ = 10
def A_ ( self : str , **UpperCAmelCase : Optional[int] ) -> str:
lowerCamelCase__ : Optional[int] = {
'num_train_timesteps': 1100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**a_ )
return config
def A_ ( self : Optional[Any] ) -> Union[str, Any]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a_ )
def A_ ( self : Optional[Any] ) -> Dict:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a_ , beta_end=a_ )
def A_ ( self : Optional[int] ) -> Dict:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a_ )
def A_ ( self : List[str] ) -> Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def A_ ( self : Any ) -> Dict:
lowerCamelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCamelCase__ : List[Any] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCamelCase__ : Dict = scheduler_class(**a_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase__ : Union[str, Any] = self.dummy_model()
lowerCamelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase__ : Optional[int] = sample.to(a_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ : str = scheduler.scale_model_input(a_ , a_ )
lowerCamelCase__ : int = model(a_ , a_ )
lowerCamelCase__ : List[Any] = scheduler.step(a_ , a_ , a_ )
lowerCamelCase__ : Dict = output.prev_sample
lowerCamelCase__ : Optional[Any] = torch.sum(torch.abs(a_ ) )
lowerCamelCase__ : Union[str, Any] = torch.mean(torch.abs(a_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1e-3
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
if torch_device == "mps":
return
lowerCamelCase__ : List[Any] = self.scheduler_classes[0]
lowerCamelCase__ : str = self.get_scheduler_config()
lowerCamelCase__ : List[Any] = scheduler_class(**a_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase__ : str = self.dummy_model()
lowerCamelCase__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase__ : Tuple = sample.to(a_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase__ : Tuple = scheduler.scale_model_input(a_ , a_ )
lowerCamelCase__ : int = model(a_ , a_ )
lowerCamelCase__ : Optional[int] = scheduler.step(a_ , a_ , a_ )
lowerCamelCase__ : Union[str, Any] = output.prev_sample
lowerCamelCase__ : Union[str, Any] = torch.sum(torch.abs(a_ ) )
lowerCamelCase__ : Union[str, Any] = torch.mean(torch.abs(a_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
def A_ ( self : Any ) -> str:
if torch_device == "mps":
return
lowerCamelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCamelCase__ : List[str] = self.get_scheduler_config()
lowerCamelCase__ : Tuple = scheduler_class(**a_ )
scheduler.set_timesteps(self.num_inference_steps , device=a_ )
lowerCamelCase__ : Optional[int] = self.dummy_model()
lowerCamelCase__ : Dict = self.dummy_sample_deter.to(a_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase__ : List[Any] = scheduler.scale_model_input(a_ , a_ )
lowerCamelCase__ : Optional[Any] = model(a_ , a_ )
lowerCamelCase__ : Optional[Any] = scheduler.step(a_ , a_ , a_ )
lowerCamelCase__ : Optional[int] = output.prev_sample
lowerCamelCase__ : Optional[int] = torch.sum(torch.abs(a_ ) )
lowerCamelCase__ : Union[str, Any] = torch.mean(torch.abs(a_ ) )
if str(a_ ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
| 359
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : List[Any] = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
| 0
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _SCREAMING_SNAKE_CASE ( unittest.TestCase , _a ):
def _A ( self : Union[str, Any] ):
UpperCamelCase :Union[str, Any] = load_tool("""text-classification""" )
self.tool.setup()
UpperCamelCase :str = load_tool("""text-classification""" , remote=__lowerCamelCase )
def _A ( self : Optional[Any] ):
UpperCamelCase :Optional[int] = self.tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(__lowerCamelCase , """positive""" )
def _A ( self : List[str] ):
UpperCamelCase :int = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(__lowerCamelCase , """positive""" )
def _A ( self : int ):
UpperCamelCase :str = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(__lowerCamelCase , """positive""" )
def _A ( self : Dict ):
UpperCamelCase :Optional[int] = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(__lowerCamelCase , """positive""" )
| 38
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = """xlm-roberta"""
def __init__( self : Any , lowerCAmelCase : Tuple=3_0522 , lowerCAmelCase : Tuple=768 , lowerCAmelCase : Any=12 , lowerCAmelCase : str=12 , lowerCAmelCase : Any=3072 , lowerCAmelCase : int="gelu" , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : List[str]=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : int=1E-12 , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Any=2 , lowerCAmelCase : int="absolute" , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Dict=None , **lowerCAmelCase : Any , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase)
_snake_case : List[Any] = vocab_size
_snake_case : Optional[Any] = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : List[Any] = hidden_act
_snake_case : Tuple = intermediate_size
_snake_case : Any = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : List[Any] = max_position_embeddings
_snake_case : List[str] = type_vocab_size
_snake_case : Optional[int] = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Optional[Any] = position_embedding_type
_snake_case : Tuple = use_cache
_snake_case : Optional[Any] = classifier_dropout
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_snake_case : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
| 317
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase_ : int = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "facebook/nllb-200-distilled-600M"
__UpperCamelCase = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
__UpperCamelCase = "translator"
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = LANGUAGE_CODES
__UpperCamelCase = ["text", "text", "text"]
__UpperCamelCase = ["text"]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Dict , lowercase_ : str , lowercase_ : str):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.')
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.')
SCREAMING_SNAKE_CASE_ : Any = self.lang_to_code[src_lang]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowercase_ , return_tensors='''pt''' , src_lang=lowercase_ , tgt_lang=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Any):
'''simple docstring'''
return self.model.generate(**lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : int):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowercase_)
| 318
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "SpeechT5FeatureExtractor"
__UpperCamelCase = "SpeechT5Tokenizer"
def __init__( self : Any , lowercase_ : Dict , lowercase_ : Optional[Any]):
'''simple docstring'''
super().__init__(lowercase_ , lowercase_)
def __call__( self : List[Any] , *lowercase_ : List[Any] , **lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('''audio''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('''text''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('''text_target''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''audio_target''' , lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''sampling_rate''' , lowercase_)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
elif text is not None:
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(lowercase_ , **lowercase_)
else:
SCREAMING_SNAKE_CASE_ : Any = None
if audio_target is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor(audio_target=lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = targets['''input_values''']
elif text_target is not None:
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE_ : int = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Any = decoder_attention_mask
return inputs
def _SCREAMING_SNAKE_CASE ( self : Tuple , *lowercase_ : Tuple , **lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''input_values''' , lowercase_)
SCREAMING_SNAKE_CASE_ : int = kwargs.pop('''input_ids''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop('''labels''' , lowercase_)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
elif input_ids is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer.pad(lowercase_ , **lowercase_)
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowercase_ , lowercase_) and "input_ids" in labels[0]):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer.pad(lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.feature_size
SCREAMING_SNAKE_CASE_ : Optional[int] = self.feature_extractor.num_mel_bins
SCREAMING_SNAKE_CASE_ : str = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : str = feature_size_hack
SCREAMING_SNAKE_CASE_ : Dict = targets['''input_values''']
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE_ : Dict = labels
SCREAMING_SNAKE_CASE_ : List[str] = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = decoder_attention_mask
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : Tuple):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowercase_ : Dict , **lowercase_ : List[Any]):
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_)
| 318
| 1
|
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__snake_case =3
def a_ ( lowerCamelCase : int ):
print('Generating primitive root of p' )
while True:
lowerCAmelCase = random.randrange(3 , lowerCamelCase )
if pow(lowerCamelCase , 2 , lowerCamelCase ) == 1:
continue
if pow(lowerCamelCase , lowerCamelCase , lowerCamelCase ) == 1:
continue
return g
def a_ ( lowerCamelCase : int ):
print('Generating prime p...' )
lowerCAmelCase = rabin_miller.generate_large_prime(lowerCamelCase ) # select large prime number.
lowerCAmelCase = primitive_root(lowerCamelCase ) # one primitive root on modulo p.
lowerCAmelCase = random.randrange(3 , lowerCamelCase ) # private_key -> have to be greater than 2 for safety.
lowerCAmelCase = cryptomath.find_mod_inverse(pow(lowerCamelCase , lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
lowerCAmelCase = (key_size, e_a, e_a, p)
lowerCAmelCase = (key_size, d)
return public_key, private_key
def a_ ( lowerCamelCase : str , lowerCamelCase : int ):
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print('\nWARNING:' )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'Use a different name or delete these files and re-run this program.' )
sys.exit()
lowerCAmelCase , lowerCAmelCase = generate_key(lowerCamelCase )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' , 'w' ) as fo:
fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' , 'w' ) as fo:
fo.write(f'''{private_key[0]},{private_key[1]}''' )
def a_ ( ):
print('Making key files...' )
make_key_files('elgamal' , 2048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 4
|
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __UpperCamelCase ( lowercase__ : Optional[int], lowercase__ : str, lowercase__ : int ):
'''simple docstring'''
__lowercase =OmegaConf.load(lowercase__ )
__lowercase =torch.load(lowercase__, map_location='cpu' )['model']
__lowercase =list(state_dict.keys() )
# extract state_dict for VQVAE
__lowercase ={}
__lowercase ='first_stage_model.'
for key in keys:
if key.startswith(lowercase__ ):
__lowercase =state_dict[key]
# extract state_dict for UNetLDM
__lowercase ={}
__lowercase ='model.diffusion_model.'
for key in keys:
if key.startswith(lowercase__ ):
__lowercase =state_dict[key]
__lowercase =config.model.params.first_stage_config.params
__lowercase =config.model.params.unet_config.params
__lowercase =VQModel(**lowercase__ ).eval()
vqvae.load_state_dict(lowercase__ )
__lowercase =UNetLDMModel(**lowercase__ ).eval()
unet.load_state_dict(lowercase__ )
__lowercase =DDIMScheduler(
timesteps=config.model.params.timesteps, beta_schedule='scaled_linear', beta_start=config.model.params.linear_start, beta_end=config.model.params.linear_end, clip_sample=lowercase__, )
__lowercase =LDMPipeline(lowercase__, lowercase__, lowercase__ )
pipeline.save_pretrained(lowercase__ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
UpperCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 141
| 0
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A ( unittest.TestCase ):
@property
def lowercase_ (self : Tuple ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def lowercase_ (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.dummy_uncond_unet
UpperCAmelCase__ = ScoreSdeVeScheduler()
UpperCAmelCase__ = ScoreSdeVePipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
sde_ve.to(__UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=__UpperCAmelCase ).images
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=__UpperCAmelCase , return_dict=__UpperCAmelCase )[
0
]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
UpperCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A ( unittest.TestCase ):
def lowercase_ (self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase__ = "google/ncsnpp-church-256"
UpperCAmelCase__ = UNetaDModel.from_pretrained(__UpperCAmelCase )
UpperCAmelCase__ = ScoreSdeVeScheduler.from_pretrained(__UpperCAmelCase )
UpperCAmelCase__ = ScoreSdeVePipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
sde_ve.to(__UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = sde_ve(num_inference_steps=1_0 , output_type="numpy" , generator=__UpperCAmelCase ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
UpperCAmelCase__ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 355
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase__ = 1_6
UpperCamelCase__ = 3_2
def lowerCAmelCase_ ( __A ) -> Union[str, Any]:
'''simple docstring'''
return int(x / 2**20 )
class A :
def __enter__(self : Dict ) -> int:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
UpperCAmelCase__ = torch.cuda.memory_allocated()
return self
def __exit__(self : List[str] , *__UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
UpperCAmelCase__ = torch.cuda.memory_allocated()
UpperCAmelCase__ = torch.cuda.max_memory_allocated()
UpperCAmelCase__ = bamb(self.end - self.begin )
UpperCAmelCase__ = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCAmelCase_ ( __A, __A = 16, __A = "bert-base-cased", __A = 320, __A = 160, ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = AutoTokenizer.from_pretrained(__A )
UpperCAmelCase__ = load_dataset(
"glue", "mrpc", split={"train": f"""train[:{n_train}]""", "validation": f"""validation[:{n_val}]"""} )
def tokenize_function(__A ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ = tokenizer(examples["sentence1"], examples["sentence2"], truncation=__A, max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase__ = datasets.map(
__A, batched=__A, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=__A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(__A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__A, padding="max_length", max_length=128, return_tensors="pt" )
return tokenizer.pad(__A, padding="longest", return_tensors="pt" )
# Instantiate dataloaders.
UpperCAmelCase__ = DataLoader(
tokenized_datasets["train"], shuffle=__A, collate_fn=__A, batch_size=__A )
UpperCAmelCase__ = DataLoader(
tokenized_datasets["validation"], shuffle=__A, collate_fn=__A, batch_size=__A )
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ = config["lr"]
UpperCAmelCase__ = int(config["num_epochs"] )
UpperCAmelCase__ = int(config["seed"] )
UpperCAmelCase__ = int(config["batch_size"] )
UpperCAmelCase__ = args.model_name_or_path
set_seed(__A )
UpperCAmelCase__ , UpperCAmelCase__ = get_dataloaders(__A, __A, __A, args.n_train, args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(__A, return_dict=__A )
# Instantiate optimizer
UpperCAmelCase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase__ = optimizer_cls(params=model.parameters(), lr=__A )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
UpperCAmelCase__ = 1
UpperCAmelCase__ = (len(__A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=__A, num_warmup_steps=0, num_training_steps=__A, )
else:
UpperCAmelCase__ = DummyScheduler(__A, total_num_steps=__A, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = accelerator.prepare(
__A, __A, __A, __A, __A )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase__ = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase__ = 0
# Now we train the model
UpperCAmelCase__ = {}
for epoch in range(__A, __A ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__A ):
UpperCAmelCase__ = model(**__A )
UpperCAmelCase__ = outputs.loss
UpperCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(__A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
UpperCAmelCase__ = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, "peak_memory_utilization.json" ), "w" ) as f:
json.dump(__A, __A )
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path", type=__A, default="bert-base-cased", help="Path to pretrained model or model identifier from huggingface.co/models.", required=__A, )
parser.add_argument(
"--output_dir", type=__A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--peak_memory_upper_bound", type=__A, default=__A, help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.", )
parser.add_argument(
"--n_train", type=__A, default=320, help="Number of training examples to use.", )
parser.add_argument(
"--n_val", type=__A, default=160, help="Number of validation examples to use.", )
parser.add_argument(
"--num_epochs", type=__A, default=1, help="Number of train epochs.", )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__A, __A )
if __name__ == "__main__":
main()
| 143
| 0
|
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase : int = torch.nn.Linear(10 , 10 )
UpperCAmelCase : List[Any] = torch.optim.SGD(model.parameters() , 0.1 )
UpperCAmelCase : int = Accelerator()
UpperCAmelCase : List[str] = accelerator.prepare(__snake_case )
try:
pickle.loads(pickle.dumps(__snake_case ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 23
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[str] = '''vivit'''
def __init__( self ,SCREAMING_SNAKE_CASE__=2_24 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=[2, 16, 16] ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=30_72 ,SCREAMING_SNAKE_CASE__="gelu_fast" ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-06 ,SCREAMING_SNAKE_CASE__=True ,**SCREAMING_SNAKE_CASE__ ,) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = hidden_size
__SCREAMING_SNAKE_CASE :List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE :Union[str, Any] = intermediate_size
__SCREAMING_SNAKE_CASE :Any = hidden_act
__SCREAMING_SNAKE_CASE :Optional[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :Any = initializer_range
__SCREAMING_SNAKE_CASE :Optional[int] = layer_norm_eps
__SCREAMING_SNAKE_CASE :Optional[int] = image_size
__SCREAMING_SNAKE_CASE :List[str] = num_frames
__SCREAMING_SNAKE_CASE :Any = tubelet_size
__SCREAMING_SNAKE_CASE :str = num_channels
__SCREAMING_SNAKE_CASE :Any = qkv_bias
super().__init__(**SCREAMING_SNAKE_CASE__ )
| 191
| 0
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__snake_case ="""3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 55
|
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__snake_case =logging.getLogger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : List[str] , UpperCAmelCase__ : Optional[Any]=-1 ) -> Dict:
# in NER datasets, the last column is usually reserved for NER label
lowerCAmelCase = label_idx
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = mode.value
lowerCAmelCase = os.path.join(UpperCAmelCase__ , F'''{mode}.txt''' )
lowerCAmelCase = 1
lowerCAmelCase = []
with open(UpperCAmelCase__ , encoding='utf-8' ) as f:
lowerCAmelCase = []
lowerCAmelCase = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
lowerCAmelCase = []
lowerCAmelCase = []
else:
lowerCAmelCase = line.split(' ' )
words.append(splits[0] )
if len(UpperCAmelCase__ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
return examples
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Any:
lowerCAmelCase = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(UpperCAmelCase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(UpperCAmelCase__ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , 'r' ) as f:
lowerCAmelCase = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] ) -> str:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , 'r' ) as f:
lowerCAmelCase = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCAmelCase_ ( __lowercase ):
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = mode.value
lowerCAmelCase = os.path.join(UpperCAmelCase__ , F'''{mode}.txt''' )
lowerCAmelCase = 1
lowerCAmelCase = []
with open(UpperCAmelCase__ , encoding='utf-8' ) as f:
for sentence in parse_incr(UpperCAmelCase__ ):
lowerCAmelCase = []
lowerCAmelCase = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> str:
lowerCAmelCase = 0
for sentence in parse_incr(UpperCAmelCase__ ):
lowerCAmelCase = preds_list[example_id]
lowerCAmelCase = ''
for token in sentence:
out += F'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(UpperCAmelCase__ )
example_id += 1
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 55
| 1
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCAmelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Optional[Any]=None ) -> Tuple:
'''simple docstring'''
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
| 68
|
"""simple docstring"""
from collections import defaultdict
class _UpperCAmelCase:
def __init__( self , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_UpperCamelCase = [
[-1 for i in range(total + 1)] for j in range(2 ** len(__a))
]
_UpperCamelCase = defaultdict(__a) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_UpperCamelCase = (1 << len(__a)) - 1
def UpperCAmelCase ( self , __a , __a) -> Dict:
'''simple docstring'''
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_UpperCamelCase = self.count_ways_until(__a , task_no + 1)
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1)
# save the value.
_UpperCamelCase = total_ways_util
return self.dp[mask][task_no]
def UpperCAmelCase ( self , __a) -> int:
'''simple docstring'''
# Store the list of persons for each task
for i in range(len(__a)):
for j in task_performed[i]:
self.task[j].append(__a)
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1)
if __name__ == "__main__":
_a = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_a = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 194
| 0
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def _lowerCamelCase( a ):
__a = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F"{test_file} instead." )
__a = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
__a = components[:-1] + [test_fn.replace(".py" , "" )]
__a = ".".join(a )
return test_module_path
def _lowerCamelCase( a ):
__a = get_module_path(a )
__a = importlib.import_module(a )
return test_module
def _lowerCamelCase( a ):
__a = []
__a = get_test_module(a )
for attr in dir(a ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(a , a ) )
# sort with class names
return sorted(a , key=lambda a : x.__name__ )
def _lowerCamelCase( a ):
__a = []
__a = get_test_module(a )
for attr in dir(a ):
__a = getattr(a , a )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__a = getattr(a , "all_model_classes" , [] )
if len(a ) > 0:
test_classes.append(a )
# sort with class names
return sorted(a , key=lambda a : x.__name__ )
def _lowerCamelCase( a ):
__a = get_test_classes(a )
__a = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(a , key=lambda a : x.__name__ )
def _lowerCamelCase( a ):
__a = test_class()
if hasattr(a , "setUp" ):
test.setUp()
__a = None
if hasattr(a , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__a = test.model_tester.__class__
return model_tester
def _lowerCamelCase( a , a ):
__a = get_test_classes(a )
__a = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(a )
# sort with class names
return sorted(a , key=lambda a : x.__name__ )
def _lowerCamelCase( a , a ):
__a = get_test_classes_for_model(a , a )
__a = []
for test_class in test_classes:
__a = get_model_tester_from_test_class(a )
if tester_class is not None:
tester_classes.append(a )
# sort with class names
return sorted(a , key=lambda a : x.__name__ )
def _lowerCamelCase( a ):
__a = get_test_classes(a )
__a = {test_class: get_model_tester_from_test_class(a ) for test_class in test_classes}
return test_tester_mapping
def _lowerCamelCase( a ):
__a = get_model_classes(a )
__a = {
model_class: get_test_classes_for_model(a , a ) for model_class in model_classes
}
return model_test_mapping
def _lowerCamelCase( a ):
__a = get_model_classes(a )
__a = {
model_class: get_tester_classes_for_model(a , a ) for model_class in model_classes
}
return model_to_tester_mapping
def _lowerCamelCase( a ):
if isinstance(a , a ):
return o
elif isinstance(a , a ):
return o.__name__
elif isinstance(a , (list, tuple) ):
return [to_json(a ) for x in o]
elif isinstance(a , a ):
return {to_json(a ): to_json(a ) for k, v in o.items()}
else:
return o
| 370
|
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:Dict = logging.get_logger(__name__)
def _lowerCamelCase( a , a , a ):
__a = WavaVecaForSequenceClassification.from_pretrained(a , config=a )
__a = downstream_dict["projector.weight"]
__a = downstream_dict["projector.bias"]
__a = downstream_dict["model.post_net.linear.weight"]
__a = downstream_dict["model.post_net.linear.bias"]
return model
def _lowerCamelCase( a , a , a ):
__a = WavaVecaForAudioFrameClassification.from_pretrained(a , config=a )
__a = downstream_dict["model.linear.weight"]
__a = downstream_dict["model.linear.bias"]
return model
def _lowerCamelCase( a , a , a ):
__a = WavaVecaForXVector.from_pretrained(a , config=a )
__a = downstream_dict["connector.weight"]
__a = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__a = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__a = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__a = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
__a = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
__a = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
__a = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
__a = downstream_dict["objective.W"]
return model
@torch.no_grad()
def _lowerCamelCase( a , a , a , a ):
__a = torch.load(a , map_location="cpu" )
__a = checkpoint["Downstream"]
__a = WavaVecaConfig.from_pretrained(a )
__a = WavaVecaFeatureExtractor.from_pretrained(
a , return_attention_mask=a , do_normalize=a )
__a = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
__a = convert_classification(a , a , a )
elif arch.endswith("ForAudioFrameClassification" ):
__a = convert_diarization(a , a , a )
elif arch.endswith("ForXVector" ):
__a = convert_xvector(a , a , a )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__a = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(a )
hf_model.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
SCREAMING_SNAKE_CASE__:Any = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 268
| 0
|
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
a : Optional[int] = mock.Mock()
a : Any = 500
a : Dict = {}
a : Union[str, Any] = HTTPError
a : Tuple = {}
# Download this model to make sure it's in the cache.
a : Union[str, Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase__ ) as mock_head:
a : Optional[int] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __a ( self ) -> Tuple:
# A mock response for an HTTP head request to emulate server down
a : List[Any] = mock.Mock()
a : Union[str, Any] = 500
a : str = {}
a : Dict = HTTPError
a : Tuple = {}
# Download this model to make sure it's in the cache.
a : Dict = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase__ ) as mock_head:
a : Union[str, Any] = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def __a ( self ) -> List[Any]:
# This test is for deprecated behavior and can be removed in v5
try:
a : Dict = tempfile.mktemp()
with open(lowerCAmelCase__ , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , lowerCAmelCase__ )
a : int = AlbertTokenizer.from_pretrained(lowerCAmelCase__ )
finally:
os.remove(lowerCAmelCase__ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , lowerCAmelCase__ )
a : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def __a ( self ) -> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
a : Dict = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class __UpperCamelCase ( unittest.TestCase ):
lowerCamelCase : List[str] =["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def __a ( cls ) -> Union[str, Any]:
a : List[Any] = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def __a ( cls ) -> Any:
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def __a ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
a : Tuple = os.path.join(lowerCAmelCase__ , "vocab.txt" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
a : Tuple = BertTokenizer(lowerCAmelCase__ )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
a : Dict = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase__ , repo_id="test-tokenizer" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
a : List[Any] = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def __a ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
a : List[Any] = os.path.join(lowerCAmelCase__ , "vocab.txt" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
a : int = BertTokenizer(lowerCAmelCase__ )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
a : Any = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCAmelCase__ , repo_id="valid_org/test-tokenizer-org" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
a : str = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def __a ( self ) -> Optional[int]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
a : List[Any] = os.path.join(lowerCAmelCase__ , "vocab.txt" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
a : str = CustomTokenizer(lowerCAmelCase__ )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
a : Tuple = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
a : Optional[Any] = os.path.join(lowerCAmelCase__ , "vocab.txt" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
a : Optional[int] = BertTokenizerFast.from_pretrained(lowerCAmelCase__ )
bert_tokenizer.save_pretrained(lowerCAmelCase__ )
a : Optional[Any] = CustomTokenizerFast.from_pretrained(lowerCAmelCase__ )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
a : Dict = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
a : Optional[int] = AutoTokenizer.from_pretrained(
f"""{USER}/test-dynamic-tokenizer""" , use_fast=lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Union[str, Any]:
a : Any = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def __a ( self ) -> Dict:
a : int = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def __a ( self ) -> Union[str, Any]:
a : Tuple = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def __a ( self ) -> Optional[Any]:
a : List[str] = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def __a ( self ) -> Optional[Any]:
a : str = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def __a ( self ) -> List[Any]:
a : Optional[Any] = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def __a ( self ) -> Any:
a : Dict = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def __a ( self ) -> Union[str, Any]:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
a : Tuple = Trie()
a : Union[str, Any] = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCAmelCase__ , ["AB", "C"] )
| 105
|
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
a : int = '''<<<<<<< This should probably be modified because it mentions: '''
a : Tuple = '''=======
>>>>>>>
'''
a : List[Any] = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
a : Union[str, Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _SCREAMING_SNAKE_CASE ( _lowercase : Namespace ) ->Any:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __UpperCamelCase ( a__ ):
@staticmethod
def __a ( lowerCAmelCase__ ) -> int:
a : Any = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ ) -> Tuple:
a : List[str] = get_logger("datasets-cli/converting" )
a : Optional[int] = tfds_path
a : str = datasets_directory
def __a ( self ) -> str:
if os.path.isdir(self._tfds_path ):
a : int = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
a : Optional[Any] = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
a : str = os.path.abspath(self._datasets_directory )
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
a : str = []
a : Union[str, Any] = []
a : str = {}
if os.path.isdir(self._tfds_path ):
a : Dict = os.listdir(lowerCAmelCase__ )
else:
a : Dict = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""" )
a : Any = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if not os.path.isfile(lowerCAmelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(lowerCAmelCase__ , encoding="utf-8" ) as f:
a : Any = f.readlines()
a : Tuple = []
a : Any = False
a : Dict = False
a : Dict = []
for line in lines:
a : Tuple = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a : Optional[int] = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
a : Optional[Any] = ""
continue
elif "from absl import logging" in out_line:
a : Any = "from datasets import logging\n"
elif "getLogger" in out_line:
a : int = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
a : Optional[Any] = True
a : Optional[Any] = list(filter(lambda lowerCAmelCase__ : e in out_line , lowerCAmelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase__ ) + "\n" )
out_lines.append(lowerCAmelCase__ )
out_lines.append(lowerCAmelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
a : int = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a : int = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCAmelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
a : List[str] = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a : str = True
out_lines.append(lowerCAmelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a : Tuple = f_name.replace(".py" , "" )
a : Optional[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
self._logger.info(f"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase__ )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase__ )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.writelines(lowerCAmelCase__ )
self._logger.info(f"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
a : int = os.path.basename(lowerCAmelCase__ )
a : int = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(lowerCAmelCase__ , lowerCAmelCase__ )
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 105
| 1
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = inspect.getfile(accelerate.test_utils )
UpperCAmelCase_ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase_ : Tuple = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase_ : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase_ : str = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase_ : Optional[Any] = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
UpperCAmelCase_ : Any = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
UpperCAmelCase_ : List[str] = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase_ = Accelerator()
lowerCamelCase_ = (accelerator.state.process_index + 2, 10)
lowerCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
lowerCamelCase_ = ''''''
lowerCamelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowerCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowerCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 253
|
"""simple docstring"""
def snake_case ( A__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(A__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 253
| 1
|
from __future__ import annotations
def lowerCAmelCase_ ( __A ) -> float:
'''simple docstring'''
UpperCAmelCase__ = 0.00
UpperCAmelCase__ = 0
for resistor in resistors:
if resistor <= 0:
UpperCAmelCase__ = f"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(__A )
first_sum += 1 / float(__A )
index += 1
return 1 / first_sum
def lowerCAmelCase_ ( __A ) -> float:
'''simple docstring'''
UpperCAmelCase__ = 0.00
UpperCAmelCase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCAmelCase__ = f"""Resistor at index {index} has a negative value!"""
raise ValueError(__A )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65
|
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__A, __A )
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = emb.weight.shape
UpperCAmelCase__ = nn.Linear(__A, __A, bias=__A )
UpperCAmelCase__ = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A, __A="facebook/mbart-large-en-ro", __A=False, __A=False ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = torch.load(__A, map_location="cpu" )["model"]
remove_ignore_keys_(__A )
UpperCAmelCase__ = state_dict["encoder.embed_tokens.weight"].shape[0]
UpperCAmelCase__ = MBartConfig.from_pretrained(__A, vocab_size=__A )
if mbart_aa and finetuned:
UpperCAmelCase__ = "relu"
UpperCAmelCase__ = state_dict["decoder.embed_tokens.weight"]
UpperCAmelCase__ = MBartForConditionalGeneration(__A )
model.model.load_state_dict(__A )
if finetuned:
UpperCAmelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 65
| 1
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = 3
_lowerCamelCase = 2_5_0
_lowerCamelCase = ids_tensor((batch_size, length) , _a )
_lowerCamelCase = torch.ones((batch_size, length) , device=_a , dtype=torch.float ) / length
return input_ids, scores
def snake_case__ ( self ):
_lowerCamelCase = self._get_tensors(5 )
_lowerCamelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_a , _a ) )
_lowerCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(_a , _a ) )
_lowerCamelCase = self._get_tensors(1_0 )
self.assertTrue(criteria(_a , _a ) )
def snake_case__ ( self ):
_lowerCamelCase = MaxLengthCriteria(max_length=1_0 )
_lowerCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(_a , _a ) )
_lowerCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(_a , _a ) )
_lowerCamelCase = self._get_tensors(1_0 )
self.assertTrue(criteria(_a , _a ) )
def snake_case__ ( self ):
_lowerCamelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_lowerCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(_a , _a ) )
_lowerCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(_a , _a ) )
_lowerCamelCase = self._get_tensors(1_0 )
self.assertTrue(criteria(_a , _a ) )
_lowerCamelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def snake_case__ ( self ):
_lowerCamelCase = self._get_tensors(5 )
_lowerCamelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_a , _a ) )
_lowerCamelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_a , _a ) )
def snake_case__ ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(_a ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
_lowerCamelCase = validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(_a ) , 1 )
| 362
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.