code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _snake_case ( *_snake_case : Dict ):
with open(_snake_case , '''r''' ) as fh:
fcntl.flock(_snake_case , fcntl.LOCK_EX )
try:
print(*_snake_case )
finally:
fcntl.flock(_snake_case , fcntl.LOCK_UN )
snake_case__ : str = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
snake_case__ : int = torch.device('''cuda''', local_rank)
snake_case__ : Union[str, Any] = socket.gethostname()
snake_case__ : str = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
snake_case__ : List[Any] = dist.get_rank()
snake_case__ : List[str] = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 60
|
'''simple docstring'''
from numpy import exp, pi, sqrt
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185
| 0
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
SCREAMING_SNAKE_CASE : List[str] = 100
SCREAMING_SNAKE_CASE : Any = set(range(3, NUM_PRIMES, 2))
primes.add(2)
SCREAMING_SNAKE_CASE : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def UpperCamelCase ( _a ) -> set[int]:
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowercase_ :set[int] = set()
lowercase_ :int
lowercase_ :int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCamelCase ( _a = 5_0_0_0 ) -> int | None:
'''simple docstring'''
for number_to_partition in range(1 , _a ):
if len(partition(_a ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"{solution() = }")
| 252
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self ):
lowercase_ :List[Any] = 1
lowercase_ :List[Any] = 3
lowercase_ :str = (32, 32)
lowercase_ :Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCamelCase_ )
@property
def UpperCamelCase ( self ):
def extract(*UpperCamelCase_ , **UpperCamelCase_ ):
class UpperCamelCase :
'''simple docstring'''
def __init__( self ):
lowercase_ :Dict = torch.ones([0] )
def UpperCamelCase ( self , UpperCamelCase_ ):
self.pixel_values.to(UpperCamelCase_ )
return self
return Out()
return extract
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ :List[Any] = self.dummy_cond_unet
lowercase_ :int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
lowercase_ :Any = self.dummy_vae
lowercase_ :Dict = self.dummy_text_encoder
lowercase_ :str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase_ :Optional[Any] = StableDiffusionPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
lowercase_ :Union[str, Any] = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :str = '''A painting of a squirrel eating a burger'''
lowercase_ :int = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[Any] = sd_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
lowercase_ :Any = output.images
lowercase_ :List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :List[Any] = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCamelCase_ , )[0]
lowercase_ :Dict = image[0, -3:, -3:, -1]
lowercase_ :List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ :List[Any] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ :List[str] = self.dummy_cond_unet
lowercase_ :Optional[Any] = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
lowercase_ :Optional[Any] = self.dummy_vae
lowercase_ :List[Any] = self.dummy_text_encoder
lowercase_ :str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase_ :Tuple = StableDiffusionPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
lowercase_ :Optional[int] = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :str = '''A painting of a squirrel eating a burger'''
lowercase_ :Any = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[int] = sd_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
lowercase_ :Optional[Any] = output.images
lowercase_ :List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :List[str] = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCamelCase_ , )[0]
lowercase_ :Dict = image[0, -3:, -3:, -1]
lowercase_ :str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ :Dict = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :List[str] = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(pipe.scheduler , UpperCamelCase_ )
assert pipe.safety_checker is None
lowercase_ :Optional[int] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase_ )
lowercase_ :Union[str, Any] = StableDiffusionPipeline.from_pretrained(UpperCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase_ :List[Any] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.dummy_cond_unet
lowercase_ :Any = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
lowercase_ :int = self.dummy_vae
lowercase_ :Tuple = self.dummy_text_encoder
lowercase_ :Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
lowercase_ :Optional[int] = unet.half()
lowercase_ :Union[str, Any] = vae.half()
lowercase_ :Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ :Any = StableDiffusionPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
lowercase_ :Dict = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :List[str] = '''A painting of a squirrel eating a burger'''
lowercase_ :List[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=UpperCamelCase_ )
lowercase_ :Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase_ :List[Any] = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :List[Any] = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
lowercase_ :str = 40_0366_0346
lowercase_ :Optional[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
lowercase_ :Tuple = torch.manual_seed(UpperCamelCase_ )
lowercase_ :int = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase_ :List[str] = output.images
lowercase_ :int = image[0, -3:, -3:, -1]
lowercase_ :str = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
lowercase_ :Dict = torch.manual_seed(UpperCamelCase_ )
lowercase_ :Any = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase_ :int = output.images
lowercase_ :Union[str, Any] = image[0, -3:, -3:, -1]
lowercase_ :Optional[int] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :Tuple = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=UpperCamelCase_ )
lowercase_ :List[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase_ :Dict = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :Optional[int] = '''padme amidala taking a bath artwork, safe for work, no nudity'''
lowercase_ :Any = 27_3497_1755
lowercase_ :str = 7
lowercase_ :Optional[Any] = torch.manual_seed(UpperCamelCase_ )
lowercase_ :Tuple = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase_ :Optional[Any] = output.images
lowercase_ :str = image[0, -3:, -3:, -1]
lowercase_ :int = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
lowercase_ :Any = torch.manual_seed(UpperCamelCase_ )
lowercase_ :List[Any] = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase_ :List[str] = output.images
lowercase_ :Optional[Any] = image[0, -3:, -3:, -1]
lowercase_ :Optional[Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :Tuple = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
lowercase_ :Tuple = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :List[str] = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
lowercase_ :Any = 10_4435_5234
lowercase_ :Union[str, Any] = 12
lowercase_ :str = torch.manual_seed(UpperCamelCase_ )
lowercase_ :str = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase_ :Optional[int] = output.images
lowercase_ :str = image[0, -3:, -3:, -1]
lowercase_ :Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
lowercase_ :Dict = torch.manual_seed(UpperCamelCase_ )
lowercase_ :Optional[Any] = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase_ :Optional[Any] = output.images
lowercase_ :List[Any] = image[0, -3:, -3:, -1]
lowercase_ :Any = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 252
| 1
|
from typing import Any
def lowerCAmelCase_ ( __A ) -> list[Any]:
'''simple docstring'''
if not input_list:
return []
UpperCAmelCase__ = [input_list.count(__A ) for value in input_list]
UpperCAmelCase__ = max(__A ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(__A ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65
|
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __UpperCAmelCase ( _lowerCamelCase ):
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_snake_case = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase_ )
BertModel.from_pretrained(lowerCAmelCase_ )
BertTokenizer.from_pretrained(lowerCAmelCase_ )
pipeline(task='fill-mask' , model=lowerCAmelCase_ )
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_snake_case = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case = '1'
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_snake_case = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase_ )
BertModel.from_pretrained(lowerCAmelCase_ )
BertTokenizer.from_pretrained(lowerCAmelCase_ )
pipeline(task='fill-mask' , model=lowerCAmelCase_ )
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_snake_case = self.get_env()
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_snake_case = self.get_env()
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
_snake_case = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case = '1'
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import pipeline\n '
_snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_snake_case = self.get_env()
_snake_case = '1'
_snake_case = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '\nfrom transformers import AutoModel\n '
_snake_case = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_snake_case = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_snake_case = self.get_env()
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case = '1'
_snake_case = subprocess.run(lowerCAmelCase_ , env=lowerCAmelCase_ , check=lowerCAmelCase_ , capture_output=lowerCAmelCase_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 42
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str =logging.get_logger(__name__)
A_ : List[str] ={
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "mra"
def __init__( self , a__=5_02_65 , a__=7_68 , a__=12 , a__=12 , a__=30_72 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=1 , a__=0.02 , a__=1e-5 , a__="absolute" , a__=4 , a__="full" , a__=0 , a__=0 , a__=1 , a__=0 , a__=2 , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
_lowerCamelCase = vocab_size
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = initializer_range
_lowerCamelCase = type_vocab_size
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = position_embedding_type
_lowerCamelCase = block_per_row
_lowerCamelCase = approx_mode
_lowerCamelCase = initial_prior_first_n_blocks
_lowerCamelCase = initial_prior_diagonal_n_blocks
| 80
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ : int =logging.get_logger(__name__)
A_ : Tuple ={
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : int = "deta"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , a__=None , a__=9_00 , a__=20_48 , a__=6 , a__=20_48 , a__=8 , a__=6 , a__=10_24 , a__=8 , a__=0.0 , a__=True , a__="relu" , a__=2_56 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.02 , a__=1.0 , a__=True , a__=False , a__="sine" , a__=5 , a__=4 , a__=4 , a__=True , a__=3_00 , a__=True , a__=True , a__=1 , a__=5 , a__=2 , a__=1 , a__=1 , a__=5 , a__=2 , a__=0.1 , a__=0.25 , **a__ , ):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowerCamelCase = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(a__ , a__ ):
_lowerCamelCase = backbone_config.pop('model_type' )
_lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase = config_class.from_dict(a__ )
_lowerCamelCase = backbone_config
_lowerCamelCase = num_queries
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = d_model
_lowerCamelCase = encoder_ffn_dim
_lowerCamelCase = encoder_layers
_lowerCamelCase = encoder_attention_heads
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = activation_function
_lowerCamelCase = init_std
_lowerCamelCase = init_xavier_std
_lowerCamelCase = encoder_layerdrop
_lowerCamelCase = auxiliary_loss
_lowerCamelCase = position_embedding_type
# deformable attributes
_lowerCamelCase = num_feature_levels
_lowerCamelCase = encoder_n_points
_lowerCamelCase = decoder_n_points
_lowerCamelCase = two_stage
_lowerCamelCase = two_stage_num_proposals
_lowerCamelCase = with_box_refine
_lowerCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
_lowerCamelCase = class_cost
_lowerCamelCase = bbox_cost
_lowerCamelCase = giou_cost
# Loss coefficients
_lowerCamelCase = mask_loss_coefficient
_lowerCamelCase = dice_loss_coefficient
_lowerCamelCase = bbox_loss_coefficient
_lowerCamelCase = giou_loss_coefficient
_lowerCamelCase = eos_coefficient
_lowerCamelCase = focal_alpha
super().__init__(is_encoder_decoder=a__ , **a__ )
@property
def snake_case_ ( self ):
return self.encoder_attention_heads
@property
def snake_case_ ( self ):
return self.d_model
def snake_case_ ( self ):
_lowerCamelCase = copy.deepcopy(self.__dict__ )
_lowerCamelCase = self.backbone_config.to_dict()
_lowerCamelCase = self.__class__.model_type
return output
| 80
| 1
|
from __future__ import annotations
import time
import numpy as np
SCREAMING_SNAKE_CASE : Optional[Any] = [8, 5, 9, 7]
SCREAMING_SNAKE_CASE : int = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> None:
"""simple docstring"""
_lowercase : List[str] = claim_vector
_lowercase : int = allocated_resources_table
_lowercase : Dict = maximum_claim_table
def UpperCamelCase ( self) -> list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table)
for i in range(len(self.__allocated_resources_table[0]))
]
def UpperCamelCase ( self) -> list[int]:
"""simple docstring"""
return np.array(self.__claim_vector) - np.array(
self.__processes_resource_summation())
def UpperCamelCase ( self) -> list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i]) - np.array(lowerCamelCase))
for i, allocated_resource in enumerate(self.__allocated_resources_table)
]
def UpperCamelCase ( self) -> dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(lowerCamelCase): i for i in self.__need()}
def UpperCamelCase ( self, **lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : Union[str, Any] = self.__need()
_lowercase : List[str] = self.__allocated_resources_table
_lowercase : List[Any] = self.__available_resources()
_lowercase : Union[str, Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n')
while need_list:
_lowercase : int = False
for each_need in need_list:
_lowercase : Dict = True
for index, need in enumerate(lowerCamelCase):
if need > available_resources[index]:
_lowercase : Tuple = False
break
if execution:
_lowercase : Tuple = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_lowercase : Tuple = original_need_index
print(F'''Process {process_number + 1} is executing.''')
# remove the process run from stack
need_list.remove(lowerCamelCase)
# update available/freed resources stack
_lowercase : Optional[Any] = np.array(lowerCamelCase) + np.array(
alloc_resources_table[process_number])
print(
'Updated available resource stack for processes: '
+ ' '.join([str(lowerCamelCase) for x in available_resources]))
break
if safe:
print('The process is in a safe state.\n')
else:
print('System in unsafe state. Aborting...\n')
break
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
print(' ' * 9 + 'Allocated Resource Table')
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(lowerCamelCase) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item)
+ '\n')
print(' ' * 9 + 'System Resource Table')
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(lowerCamelCase) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item)
+ '\n')
print(
'Current Usage by Active Processes: '
+ ' '.join(str(lowerCamelCase) for x in self.__claim_vector))
print(
'Initial Available Resources: '
+ ' '.join(str(lowerCamelCase) for x in self.__available_resources()))
time.sleep(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
|
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : int ) -> Optional[int]:
__lowerCamelCase = []
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
self.events.append('''on_init_end''' )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
self.events.append('''on_train_begin''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
self.events.append('''on_train_end''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
self.events.append('''on_epoch_begin''' )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
self.events.append('''on_epoch_end''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
self.events.append('''on_step_begin''' )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
self.events.append('''on_step_end''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
self.events.append('''on_evaluate''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : str ) -> str:
self.events.append('''on_predict''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
self.events.append('''on_save''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
self.events.append('''on_log''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
self.events.append('''on_prediction_step''' )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = tempfile.mkdtemp()
def __A ( self : int ) -> List[str]:
shutil.rmtree(self.output_dir )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : List[str]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionModelConfig(a=SCREAMING_SNAKE_CASE__ , b=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE__ , report_to=[] , **SCREAMING_SNAKE_CASE__ )
return Trainer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , callbacks=SCREAMING_SNAKE_CASE__ , )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
# Order doesn't matter
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
for cba, cba in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , cba.__class__ )
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE__ )
else:
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
__lowerCamelCase = ['''on_init_end''', '''on_train_begin''']
__lowerCamelCase = 0
__lowerCamelCase = len(trainer.get_eval_dataloader() )
__lowerCamelCase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(SCREAMING_SNAKE_CASE__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __A ( self : Union[str, Any] ) -> int:
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# Callbacks passed at init are added to the default callbacks
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowerCamelCase = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] ) -> str:
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowerCamelCase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# We can also add, pop, or remove by instance
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] ) -> Any:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# Independent log/save/eval
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# A bit of everything
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE__ ) in warn_mock.call_args[0][0]
| 270
| 0
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __a (__lowercase , __lowercase , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE :Tuple = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE :int = False
_SCREAMING_SNAKE_CASE :Dict = False
def _a ( self , _a , _a , _a=False ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
SCREAMING_SNAKE_CASE__ : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __a (__lowercase):
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ : int = seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Any = use_input_mask
SCREAMING_SNAKE_CASE__ : int = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : int = vocab_size
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = num_labels
SCREAMING_SNAKE_CASE__ : List[str] = num_choices
SCREAMING_SNAKE_CASE__ : Any = scope
SCREAMING_SNAKE_CASE__ : str = embedding_size
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Dict = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = TFMobileBertModel(config=snake_case_ )
SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE__ : Tuple = model(snake_case_ )
SCREAMING_SNAKE_CASE__ : Any = [input_ids, input_mask]
SCREAMING_SNAKE_CASE__ : Tuple = model(snake_case_ )
SCREAMING_SNAKE_CASE__ : List[str] = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFMobileBertForMaskedLM(config=snake_case_ )
SCREAMING_SNAKE_CASE__ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE__ : str = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = TFMobileBertForNextSentencePrediction(config=snake_case_ )
SCREAMING_SNAKE_CASE__ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE__ : List[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = TFMobileBertForPreTraining(config=snake_case_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE__ : int = model(snake_case_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.num_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFMobileBertForSequenceClassification(config=snake_case_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE__ : List[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.num_choices
SCREAMING_SNAKE_CASE__ : Tuple = TFMobileBertForMultipleChoice(config=snake_case_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ : Dict = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ : str = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE__ : Tuple = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.num_labels
SCREAMING_SNAKE_CASE__ : Any = TFMobileBertForTokenClassification(config=snake_case_ )
SCREAMING_SNAKE_CASE__ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFMobileBertForQuestionAnswering(config=snake_case_ )
SCREAMING_SNAKE_CASE__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE__ : Dict = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE__
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFMobileBertModelTest.TFMobileBertModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case_ )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case_ )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case_ )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case_ )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case_ )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case_ )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case_ )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case_ )
@slow
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
SCREAMING_SNAKE_CASE__ : Optional[int] = TFMobileBertModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_tf
class __a (unittest.TestCase):
'''simple docstring'''
@slow
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
SCREAMING_SNAKE_CASE__ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__ : str = model(snake_case_ )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = [1, 6, 30_522]
self.assertEqual(output.shape , snake_case_ )
SCREAMING_SNAKE_CASE__ : str = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 351
|
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __a (tf.keras.optimizers.schedules.LearningRateSchedule):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a = 1.0 , _a = None , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = initial_learning_rate
SCREAMING_SNAKE_CASE__ : Tuple = warmup_steps
SCREAMING_SNAKE_CASE__ : Optional[Any] = power
SCREAMING_SNAKE_CASE__ : Optional[Any] = decay_schedule_fn
SCREAMING_SNAKE_CASE__ : Any = name
def __call__( self , _a ) -> List[Any]:
"""simple docstring"""
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.cast(_a , tf.floataa )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.cast(self.warmup_steps , tf.floataa )
SCREAMING_SNAKE_CASE__ : str = global_step_float / warmup_steps_float
SCREAMING_SNAKE_CASE__ : Optional[int] = self.initial_learning_rate * tf.math.pow(_a , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_a , )
def _a ( self ) -> List[Any]:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 0.9 , __lowerCAmelCase = 0.999 , __lowerCAmelCase = 1E-8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = None , ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowerCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__lowerCAmelCase , )
if num_warmup_steps:
SCREAMING_SNAKE_CASE__ : Dict = WarmUp(
initial_learning_rate=__lowerCAmelCase , decay_schedule_fn=__lowerCAmelCase , warmup_steps=__lowerCAmelCase , )
if weight_decay_rate > 0.0:
SCREAMING_SNAKE_CASE__ : int = AdamWeightDecay(
learning_rate=__lowerCAmelCase , weight_decay_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=__lowerCAmelCase , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.keras.optimizers.Adam(
learning_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a = 0.001 , _a = 0.9 , _a = 0.999 , _a = 1E-7 , _a = False , _a = 0.0 , _a = None , _a = None , _a = "AdamWeightDecay" , **_a , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a , _a , _a , _a , _a , _a , **_a )
SCREAMING_SNAKE_CASE__ : Tuple = weight_decay_rate
SCREAMING_SNAKE_CASE__ : Tuple = include_in_weight_decay
SCREAMING_SNAKE_CASE__ : Dict = exclude_from_weight_decay
@classmethod
def _a ( cls , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = {"""WarmUp""": WarmUp}
return super(_a , cls ).from_config(_a , custom_objects=_a )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
super(_a , self )._prepare_local(_a , _a , _a )
SCREAMING_SNAKE_CASE__ : Tuple = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def _a ( self , _a , _a=None , **_a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = list(zip(*_a ) )
return super(_a , self ).apply_gradients(zip(_a , _a ) , name=_a , **_a )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
SCREAMING_SNAKE_CASE__ : Dict = apply_state or {}
SCREAMING_SNAKE_CASE__ : List[str] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._fallback_apply_state(_a , _a )
SCREAMING_SNAKE_CASE__ : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _a ( self , _a , _a , _a=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , _a )
SCREAMING_SNAKE_CASE__ : Any = self._decay_weights_op(_a , _a , _a )
with tf.control_dependencies([decay] ):
return super(_a , self )._resource_apply_dense(_a , _a , **_a )
def _a ( self , _a , _a , _a , _a=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self._get_lr(var.device , var.dtype.base_dtype , _a )
SCREAMING_SNAKE_CASE__ : Dict = self._decay_weights_op(_a , _a , _a )
with tf.control_dependencies([decay] ):
return super(_a , self )._resource_apply_sparse(_a , _a , _a , **_a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _a ( self , _a ) -> Tuple:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_a , _a ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_a , _a ) is not None:
return False
return True
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : List[str] = None
@property
def _a ( self ) -> str:
"""simple docstring"""
if self._accum_steps is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _a ( self ) -> List[str]:
"""simple docstring"""
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , _a ) -> str:
"""simple docstring"""
if not self._gradients:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_a ) , trainable=_a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_a ) != len(self._gradients ):
raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(_a )}''' )
for accum_gradient, gradient in zip(self._gradients , _a ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_a )
self._accum_steps.assign_add(1 )
def _a ( self ) -> Any:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_a ) )
| 56
| 0
|
from ..utils import DummyObject, requires_backends
class __A( metaclass=snake_case__ ):
snake_case_ = ["""flax"""]
def __init__( self , *_snake_case , **_snake_case ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __A( metaclass=snake_case__ ):
snake_case_ = ["""flax"""]
def __init__( self , *_snake_case , **_snake_case ) -> int:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __A( metaclass=snake_case__ ):
snake_case_ = ["""flax"""]
def __init__( self , *_snake_case , **_snake_case ) -> int:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> str:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __A( metaclass=snake_case__ ):
snake_case_ = ["""flax"""]
def __init__( self , *_snake_case , **_snake_case ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __A( metaclass=snake_case__ ):
snake_case_ = ["""flax"""]
def __init__( self , *_snake_case , **_snake_case ) -> Any:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __A( metaclass=snake_case__ ):
snake_case_ = ["""flax"""]
def __init__( self , *_snake_case , **_snake_case ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> str:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __A( metaclass=snake_case__ ):
snake_case_ = ["""flax"""]
def __init__( self , *_snake_case , **_snake_case ) -> str:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __A( metaclass=snake_case__ ):
snake_case_ = ["""flax"""]
def __init__( self , *_snake_case , **_snake_case ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __A( metaclass=snake_case__ ):
snake_case_ = ["""flax"""]
def __init__( self , *_snake_case , **_snake_case ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __A( metaclass=snake_case__ ):
snake_case_ = ["""flax"""]
def __init__( self , *_snake_case , **_snake_case ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __A( metaclass=snake_case__ ):
snake_case_ = ["""flax"""]
def __init__( self , *_snake_case , **_snake_case ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __A( metaclass=snake_case__ ):
snake_case_ = ["""flax"""]
def __init__( self , *_snake_case , **_snake_case ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __A( metaclass=snake_case__ ):
snake_case_ = ["""flax"""]
def __init__( self , *_snake_case , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , *_snake_case , **_snake_case ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
| 6
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ):
__lowerCAmelCase = 0
if start < end:
__lowerCAmelCase = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = a[end]
__lowerCAmelCase = a[pivot]
__lowerCAmelCase = temp
__lowerCAmelCase , __lowerCAmelCase = _in_place_partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , p + 1 , SCREAMING_SNAKE_CASE_ )
return count
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCAmelCase = 0
__lowerCAmelCase = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = a[end]
__lowerCAmelCase = a[pivot]
__lowerCAmelCase = temp
__lowerCAmelCase = start - 1
for index in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
__lowerCAmelCase = new_pivot_index + 1
__lowerCAmelCase = a[new_pivot_index]
__lowerCAmelCase = a[index]
__lowerCAmelCase = temp
__lowerCAmelCase = a[new_pivot_index + 1]
__lowerCAmelCase = a[end]
__lowerCAmelCase = temp
return new_pivot_index + 1, count
UpperCamelCase__ = TemporaryFile()
UpperCamelCase__ = 100 # 1000 elements are to be sorted
UpperCamelCase__ , UpperCamelCase__ = 0, 1 # mean and standard deviation
UpperCamelCase__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
UpperCamelCase__ = np.load(outfile)
UpperCamelCase__ = len(M) - 1
UpperCamelCase__ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 92
| 0
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , ):
"""simple docstring"""
UpperCAmelCase__ : Dict = parent
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : Union[str, Any] = image_size
UpperCAmelCase__ : int = patch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : Dict = is_training
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : int = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : List[str] = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : List[str] = (image_size // patch_size) ** 2
UpperCAmelCase__ : Optional[Any] = num_patches + 1
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Tuple = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, pixel_values
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = FlaxViTModel(config=_lowerCamelCase )
UpperCAmelCase__ : Any = model(_lowerCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : List[Any] = (self.image_size, self.image_size)
UpperCAmelCase__ : Tuple = (self.patch_size, self.patch_size)
UpperCAmelCase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = self.type_sequence_label_size
UpperCAmelCase__ : Any = FlaxViTForImageClassification(config=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : List[Any] = 1
UpperCAmelCase__ : Tuple = FlaxViTForImageClassification(_lowerCamelCase )
UpperCAmelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : str = config_and_inputs
UpperCAmelCase__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = FlaxViTModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def _a (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(_lowerCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Dict = model_class(_lowerCamelCase )
@jax.jit
def model_jitted(_lowerCamelCase , **_lowerCamelCase ):
return model(pixel_values=_lowerCamelCase , **_lowerCamelCase )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase__ : Tuple = model_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase__ : str = model_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _a (self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
UpperCAmelCase__ : Tuple = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_lowerCamelCase )
| 166
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'OwlViTImageProcessor'
SCREAMING_SNAKE_CASE = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__(self , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _lowerCamelCase , )
UpperCAmelCase__ : Optional[int] = kwargs.pop("""feature_extractor""" )
UpperCAmelCase__ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__(self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="max_length" , _lowerCamelCase="np" , **_lowerCamelCase ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(_lowerCamelCase , _lowerCamelCase ) or (isinstance(_lowerCamelCase , _lowerCamelCase ) and not isinstance(text[0] , _lowerCamelCase )):
UpperCAmelCase__ : Any = [self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )]
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(text[0] , _lowerCamelCase ):
UpperCAmelCase__ : Any = []
# Maximum number of queries across batch
UpperCAmelCase__ : int = max([len(_lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_lowerCamelCase ) != max_num_queries:
UpperCAmelCase__ : Optional[int] = t + [""" """] * (max_num_queries - len(_lowerCamelCase ))
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
encodings.append(_lowerCamelCase )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
UpperCAmelCase__ : Optional[Any] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : Any = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase__ : Any = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : List[str] = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase__ : str = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
UpperCAmelCase__ : int = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase__ : Any = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : Tuple = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
UpperCAmelCase__ : Dict = BatchEncoding()
UpperCAmelCase__ : int = input_ids
UpperCAmelCase__ : Optional[int] = attention_mask
if query_images is not None:
UpperCAmelCase__ : int = BatchEncoding()
UpperCAmelCase__ : Optional[int] = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ).pixel_values
UpperCAmelCase__ : List[Any] = query_pixel_values
if images is not None:
UpperCAmelCase__ : Any = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if text is not None and images is not None:
UpperCAmelCase__ : List[str] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase__ : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.image_processor.post_process(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def _a (self ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _lowerCamelCase , )
return self.image_processor_class
@property
def _a (self ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _lowerCamelCase , )
return self.image_processor
| 166
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : List[Any] = logging.get_logger(__name__)
lowerCAmelCase__ : List[Any] = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __snake_case ( __UpperCAmelCase ):
__lowerCamelCase = """gpt_neox"""
def __init__( self , __UpperCamelCase=50432 , __UpperCamelCase=6144 , __UpperCamelCase=44 , __UpperCamelCase=64 , __UpperCamelCase=24576 , __UpperCamelCase="gelu" , __UpperCamelCase=0.2_5 , __UpperCamelCase=10000 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase=2048 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1E-5 , __UpperCamelCase=True , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=None , **__UpperCamelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
snake_case__ : str = vocab_size
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : Optional[Any] = hidden_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : List[Any] = intermediate_size
snake_case__ : int = hidden_act
snake_case__ : Dict = rotary_pct
snake_case__ : List[str] = rotary_emb_base
snake_case__ : Union[str, Any] = attention_dropout
snake_case__ : List[Any] = hidden_dropout
snake_case__ : Any = classifier_dropout
snake_case__ : List[str] = initializer_range
snake_case__ : Union[str, Any] = layer_norm_eps
snake_case__ : List[str] = use_cache
snake_case__ : Tuple = tie_word_embeddings
snake_case__ : List[Any] = use_parallel_residual
snake_case__ : List[str] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def __a ( self ) -> List[str]:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"""got {self.rope_scaling}""" )
snake_case__ : int = self.rope_scaling.get('type' , _lowerCamelCase )
snake_case__ : Union[str, Any] = self.rope_scaling.get('factor' , _lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 143
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : str = ["""image_processor""", """tokenizer"""]
__lowerCAmelCase : Optional[Any] = """OwlViTImageProcessor"""
__lowerCAmelCase : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Union[str, Any] , _lowerCamelCase : str=None , _lowerCamelCase : Tuple=None , **_lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _lowerCamelCase , )
A_ : List[Any] = kwargs.pop('''feature_extractor''' )
A_ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self : Optional[int] , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=None , _lowerCamelCase : str="max_length" , _lowerCamelCase : List[Any]="np" , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(_lowerCamelCase , _lowerCamelCase ) or (isinstance(_lowerCamelCase , _lowerCamelCase ) and not isinstance(text[0] , _lowerCamelCase )):
A_ : List[str] = [self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )]
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(text[0] , _lowerCamelCase ):
A_ : Optional[int] = []
# Maximum number of queries across batch
A_ : Any = max([len(_lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_lowerCamelCase ) != max_num_queries:
A_ : Optional[int] = t + [''' '''] * (max_num_queries - len(_lowerCamelCase ))
A_ : Tuple = self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
encodings.append(_lowerCamelCase )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
A_ : Union[str, Any] = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
A_ : Dict = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A_ : List[Any] = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
A_ : Any = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A_ : Any = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
A_ : Union[str, Any] = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A_ : Tuple = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
A_ : str = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
A_ : Any = BatchEncoding()
A_ : Optional[Any] = input_ids
A_ : str = attention_mask
if query_images is not None:
A_ : Union[str, Any] = BatchEncoding()
A_ : Optional[Any] = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ).pixel_values
A_ : Dict = query_pixel_values
if images is not None:
A_ : int = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if text is not None and images is not None:
A_ : str = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A_ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase )
def a_ ( self : Optional[Any] , *_lowerCamelCase : int , **_lowerCamelCase : Dict ):
"""simple docstring"""
return self.image_processor.post_process(*_lowerCamelCase , **_lowerCamelCase )
def a_ ( self : Optional[Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Dict ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*_lowerCamelCase , **_lowerCamelCase )
def a_ ( self : List[Any] , *_lowerCamelCase : List[str] , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*_lowerCamelCase , **_lowerCamelCase )
def a_ ( self : str , *_lowerCamelCase : Tuple , **_lowerCamelCase : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def a_ ( self : Dict , *_lowerCamelCase : Any , **_lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def a_ ( self : List[str] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _lowerCamelCase , )
return self.image_processor_class
@property
def a_ ( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _lowerCamelCase , )
return self.image_processor
| 167
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : str = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 148
|
import os
def _A ( ):
"""simple docstring"""
a__ : Optional[int] =os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE ) , "num.txt" )
with open(SCREAMING_SNAKE_CASE ) as file_hand:
return str(sum(int(SCREAMING_SNAKE_CASE ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 148
| 1
|
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( UpperCamelCase_ ,unittest.TestCase ):
# TODO: is there an appropriate internal test set?
_a = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def UpperCAmelCase__ ( self : Tuple , A_ : Optional[Any]=0):
lowerCAmelCase_ : Optional[Any] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(A_))
lowerCAmelCase_ : Dict = torch.manual_seed(A_)
lowerCAmelCase_ : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=A_)
lowerCAmelCase_ : List[str] = self.get_dummy_inputs()
lowerCAmelCase_ : Any = pipe(**A_).images
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase_ : List[str] = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
lowerCAmelCase_ : List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=A_)
pipe.set_progress_bar_config(disable=A_)
lowerCAmelCase_ : Any = self.get_dummy_inputs()
lowerCAmelCase_ : int = pipe(**A_).images
lowerCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase_ : str = np.array(
[0.689_8892, 0.5924_0556, 0.5249_9527, 0.5886_6215, 0.5225_8235, 0.5257_2715, 0.6241_4473, 0.617_4387, 0.621_4964])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
lowerCAmelCase_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=A_)
lowerCAmelCase_ : Optional[Any] = self.get_dummy_inputs()
lowerCAmelCase_ : Optional[int] = pipe(**A_).images
lowerCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase_ : Optional[int] = np.array(
[0.765_9278, 0.7643_7664, 0.7557_9107, 0.769_1116, 0.7766_6986, 0.772_7672, 0.775_8664, 0.781_2226, 0.7694_2515])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
lowerCAmelCase_ : Tuple = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=A_)
lowerCAmelCase_ : str = self.get_dummy_inputs()
lowerCAmelCase_ : int = pipe(**A_).images
lowerCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase_ : Tuple = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
lowerCAmelCase_ : List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=A_)
lowerCAmelCase_ : Tuple = self.get_dummy_inputs()
lowerCAmelCase_ : Dict = pipe(**A_).images
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase_ : List[str] = np.array(
[0.7742_4496, 0.77_3601, 0.764_5288, 0.776_9598, 0.777_2739, 0.773_8688, 0.7818_7233, 0.7787_9584, 0.76_7043])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : Union[str, Any]):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Dict = ort.SessionOptions()
lowerCAmelCase_ : Optional[int] = False
return options
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
lowerCAmelCase_ : Tuple = init_image.resize((1_2_8, 1_2_8))
# using the PNDM scheduler by default
lowerCAmelCase_ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_)
lowerCAmelCase_ : Optional[Any] = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase_ : str = torch.manual_seed(0)
lowerCAmelCase_ : Optional[Any] = pipe(
prompt=A_ , image=A_ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=A_ , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase_ : Optional[Any] = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
lowerCAmelCase_ : List[str] = init_image.resize((1_2_8, 1_2_8))
lowerCAmelCase_ : Dict = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''')
lowerCAmelCase_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_)
lowerCAmelCase_ : Optional[Any] = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase_ : Any = torch.manual_seed(0)
lowerCAmelCase_ : List[str] = pipe(
prompt=A_ , image=A_ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=A_ , output_type='''np''' , )
lowerCAmelCase_ : List[Any] = output.images
lowerCAmelCase_ : Tuple = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase_ : Optional[int] = np.array(
[0.5017_3753, 0.5022_3356, 0.50_2039, 0.5023_3036, 0.502_3725, 0.502_2601, 0.501_8758, 0.5023_4085, 0.5024_1566])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 103
|
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
lowerCAmelCase_ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
lowerCAmelCase_ : List[Any] = '''xvjiarui/stable-diffusion-2-inpainting'''
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = FlaxStableDiffusionInpaintPipeline.from_pretrained(A_ , safety_checker=A_)
lowerCAmelCase_ : List[str] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowerCAmelCase_ : List[Any] = jax.random.PRNGKey(0)
lowerCAmelCase_ : str = 5_0
lowerCAmelCase_ : List[Any] = jax.device_count()
lowerCAmelCase_ : Union[str, Any] = num_samples * [prompt]
lowerCAmelCase_ : str = num_samples * [init_image]
lowerCAmelCase_ : Union[str, Any] = num_samples * [mask_image]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = pipeline.prepare_inputs(A_ , A_ , A_)
# shard inputs and rng
lowerCAmelCase_ : str = replicate(A_)
lowerCAmelCase_ : str = jax.random.split(A_ , jax.device_count())
lowerCAmelCase_ : List[Any] = shard(A_)
lowerCAmelCase_ : str = shard(A_)
lowerCAmelCase_ : Tuple = shard(A_)
lowerCAmelCase_ : int = pipeline(
A_ , A_ , A_ , A_ , A_ , A_ , jit=A_)
lowerCAmelCase_ : Optional[int] = output.images.reshape(A_ , 5_1_2 , 5_1_2 , 3)
lowerCAmelCase_ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase_ : int = jnp.asarray(jax.device_get(image_slice.flatten()))
lowerCAmelCase_ : str = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084])
print(F"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 103
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowerCamelCase_ : Union[str, Any] = None
lowerCamelCase_ : Any = logging.get_logger(__name__)
lowerCamelCase_ : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase_ : Dict = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase_ : List[str] = {
"""moussaKam/mbarthez""": 1_024,
"""moussaKam/barthez""": 1_024,
"""moussaKam/barthez-orangesum-title""": 1_024,
}
lowerCamelCase_ : str = """▁"""
class a__ ( __snake_case ):
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Union[str, Any] = ['input_ids', 'attention_mask']
A__ : List[Any] = BarthezTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , **UpperCAmelCase , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , **UpperCAmelCase , )
__a = vocab_file
__a = False if not self.vocab_file else True
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 197
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197
| 1
|
"""simple docstring"""
from math import pi, sqrt
def lowercase_ ( _snake_case ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(_snake_case ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_snake_case )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase_ ( ):
assert gamma(0.5 ) == sqrt(_snake_case )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase__ : Dict = 1.0
while num:
UpperCAmelCase__ : List[Any] = float(input('Gamma of: '))
print(f"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 25
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Tuple = VOCAB_FILES_NAMES
__snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[Any] = ["input_ids", "attention_mask"]
__snake_case : Optional[int] = None
def __init__( self: Dict , UpperCAmelCase_: Union[str, Any]=None , UpperCAmelCase_: str=None , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: int="<unk>" , UpperCAmelCase_: List[str]="<s>" , UpperCAmelCase_: Tuple="</s>" , UpperCAmelCase_: List[Any]="<pad>" , UpperCAmelCase_: Dict=False , UpperCAmelCase_: Dict=False , **UpperCAmelCase_: Dict , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase_ ) != add_prefix_space:
_SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase_ , pre_tok_state.pop("""type""" ) )
_SCREAMING_SNAKE_CASE = add_prefix_space
_SCREAMING_SNAKE_CASE = pre_tok_class(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCamelCase ( self: List[str] , *UpperCAmelCase_: Any , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , *UpperCAmelCase_: Dict , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , UpperCAmelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: "Conversation" ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) + [self.eos_token_id] )
if len(UpperCAmelCase_ ) > self.model_max_length:
_SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 306
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
__UpperCAmelCase = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
__UpperCAmelCase = '▁'
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : List[Any] = VOCAB_FILES_NAMES
_snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Dict = ['''input_ids''', '''attention_mask''']
_snake_case : Union[str, Any] = BarthezTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , **_UpperCamelCase , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : List[Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Tuple = vocab_file
UpperCAmelCase_ : Tuple = False if not self.vocab_file else True
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Any = [self.sep_token_id]
UpperCAmelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_ : List[Any] = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ):
copyfile(self.vocab_file , _UpperCamelCase )
return (out_vocab_file,)
| 145
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__UpperCAmelCase = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
UpperCAmelCase_ : int = self.diffusers_dir
shutil.copy(
os.path.join(_UpperCamelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : str = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Optional[Any]:
UpperCAmelCase_ : Dict = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
UpperCAmelCase_ : Optional[Any] = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
UpperCAmelCase_ : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
UpperCAmelCase_ : Optional[Any] = black.format_str(_UpperCamelCase , mode=_UpperCamelCase )
UpperCAmelCase_ : Any = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(_UpperCamelCase , 'w' , newline='\n' ) as f:
f.write(_UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCamelCase )
with open(_UpperCamelCase , 'r' ) as f:
self.assertTrue(f.read() , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCamelCase ) , )
# Copy consistency with a really long name
UpperCAmelCase_ : Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" , f"{long_class_name}SchedulerOutput" , re.sub('Bert' , _UpperCamelCase , _UpperCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCamelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCamelCase ) , )
| 145
| 1
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__lowerCAmelCase : Tuple = random.Random()
def a__ ( A_, A_=1.0, A_=None, A_=None ):
'''simple docstring'''
if rng is None:
__magic_name__ = global_rng
__magic_name__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : int=400 , UpperCamelCase__ : List[str]=2000 , UpperCamelCase__ : Tuple=2048 , UpperCamelCase__ : Union[str, Any]=128 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : int=30 , UpperCamelCase__ : Optional[Any]=4_4100 , ) -> int:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = min_seq_length
__magic_name__ = max_seq_length
__magic_name__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__magic_name__ = spectrogram_length
__magic_name__ = feature_size
__magic_name__ = num_audio_channels
__magic_name__ = hop_length
__magic_name__ = chunk_length
__magic_name__ = sampling_rate
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _lowercase ( self : Any , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[Any]=False ) -> List[Any]:
"""simple docstring"""
def _flatten(UpperCamelCase__ : Tuple ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
__magic_name__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__magic_name__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__magic_name__ = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( _A , unittest.TestCase ):
'''simple docstring'''
a__ = TvltFeatureExtractor
def _lowercase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = TvltFeatureExtractionTester(self )
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """spectrogram_length""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """feature_size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """num_audio_channels""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """hop_length""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """chunk_length""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """sampling_rate""" ) )
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
__magic_name__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
__magic_name__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
__magic_name__ = feat_extract_first.to_dict()
__magic_name__ = feat_extract_second.to_dict()
__magic_name__ = dict_first.pop("""mel_filters""" )
__magic_name__ = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : Dict ) -> str:
"""simple docstring"""
__magic_name__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = os.path.join(UpperCamelCase__ , """feat_extract.json""" )
feat_extract_first.to_json_file(UpperCamelCase__ )
__magic_name__ = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
__magic_name__ = feat_extract_first.to_dict()
__magic_name__ = feat_extract_second.to_dict()
__magic_name__ = dict_first.pop("""mel_filters""" )
__magic_name__ = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__magic_name__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__magic_name__ = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
__magic_name__ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__magic_name__ = feature_extractor(UpperCamelCase__ , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__magic_name__ = feature_extractor(
UpperCamelCase__ , return_tensors="""np""" , sampling_rate=4_4100 , mask_audio=UpperCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__magic_name__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__magic_name__ = np.asarray(UpperCamelCase__ )
__magic_name__ = feature_extractor(UpperCamelCase__ , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _lowercase ( self : Tuple , UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__magic_name__ = ds.sort("""id""" ).select(range(UpperCamelCase__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self._load_datasamples(1 )
__magic_name__ = TvltFeatureExtractor()
__magic_name__ = feature_extractor(UpperCamelCase__ , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__magic_name__ = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCamelCase__ , atol=1E-4 ) )
| 88
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """pegasus"""
a__ = ["""past_key_values"""]
a__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int]=5_0265 , UpperCamelCase__ : Optional[int]=1024 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Union[str, Any]=4096 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : List[str]=4096 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : int=False , UpperCamelCase__ : Any=0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Tuple=1 , **UpperCamelCase__ : Union[str, Any] , ) -> str:
"""simple docstring"""
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = use_cache
__magic_name__ = encoder_layers
__magic_name__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
@property
def _lowercase ( self : List[Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
return self.d_model
| 88
| 1
|
"""simple docstring"""
from collections.abc import Callable
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> float:
__lowerCAmelCase: float = a
__lowerCAmelCase: float = b
if function(__SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(__SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(__SCREAMING_SNAKE_CASE ) * function(__SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
__lowerCAmelCase: float = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(__SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(__SCREAMING_SNAKE_CASE ) * function(__SCREAMING_SNAKE_CASE ) < 0:
__lowerCAmelCase: List[str] = mid
else:
__lowerCAmelCase: List[str] = mid
__lowerCAmelCase: str = start + (end - start) / 2.0
return mid
def a__ ( __SCREAMING_SNAKE_CASE ) -> float:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 108
|
"""simple docstring"""
from math import ceil
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
__lowerCAmelCase: Tuple = list(range(0 , __SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Optional[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__lowerCAmelCase: List[Any] = []
for i in device_map_blocks:
if device_map_blocks.count(__SCREAMING_SNAKE_CASE ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__SCREAMING_SNAKE_CASE )
# Missing blocks
__lowerCAmelCase: Optional[Any] = [i for i in blocks if i not in device_map_blocks]
__lowerCAmelCase: List[Any] = [i for i in device_map_blocks if i not in blocks]
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(__SCREAMING_SNAKE_CASE ) )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
__lowerCAmelCase: List[Any] = list(range(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Dict = int(ceil(n_layers / len(__SCREAMING_SNAKE_CASE ) ) )
__lowerCAmelCase: Union[str, Any] = [layers[i : i + n_blocks] for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )]
return dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
| 108
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=a )
class __snake_case ( a ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCAmelCase__ : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
UpperCAmelCase__ : ClassVar[Features] = Features({'''labels''': ClassLabel} )
UpperCAmelCase__ : str = "text"
UpperCAmelCase__ : str = "labels"
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""")
if not isinstance(features[self.label_column] , _snake_case):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""")
UpperCAmelCase_ = copy.deepcopy(self)
UpperCAmelCase_ = self.label_schema.copy()
UpperCAmelCase_ = features[self.label_column]
UpperCAmelCase_ = label_schema
return task_template
@property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 51
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class A__ ( __magic_name__ ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase__ : Dict = 8
# DPR tok
lowerCAmelCase__ : str = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCAmelCase__ : List[str] = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(a , exist_ok=a )
lowerCAmelCase__ : Union[str, Any] = os.path.join(a , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase__ : Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowerCAmelCase__ : str = dict(zip(a , range(len(a ) ) ) )
lowerCAmelCase__ : int = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCAmelCase__ : Dict = {'unk_token': '<unk>'}
lowerCAmelCase__ : Dict = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(a , exist_ok=a )
lowerCAmelCase__ : Optional[Any] = os.path.join(a , BART_VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : Any = os.path.join(a , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , 'rag_tokenizer' )
lowerCAmelCase__ : Any = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
lowerCAmelCase__ : Any = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(a )
rag_tokenizer.save_pretrained(a )
lowerCAmelCase__ : List[str] = RagTokenizer.from_pretrained(a , config=a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
lowerCAmelCase__ : Any = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
lowerCAmelCase__ : List[str] = tokenizer(a )
self.assertIsNotNone(a )
@slow
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
lowerCAmelCase__ : List[str] = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
lowerCAmelCase__ : int = tokenizer(a )
self.assertIsNotNone(a )
| 212
| 0
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, lowercase_, lowercase_ ) -> Tuple:
super().__init__()
self.register_modules(unet=lowercase_, scheduler=lowercase_ )
@torch.no_grad()
def __call__( self, lowercase_ = 1, lowercase_ = None, lowercase_ = 50, lowercase_ = "pil", lowercase_ = True, **lowercase_, ) -> Union[ImagePipelineOutput, Tuple]:
snake_case = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=lowercase_, )
snake_case = image.to(self.device )
# set step values
self.scheduler.set_timesteps(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case = self.unet(lowercase_, lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case = self.scheduler.step(lowercase_, lowercase_, lowercase_ ).prev_sample
snake_case = (image / 2 + 0.5).clamp(0, 1 )
snake_case = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
snake_case = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=lowercase_ ), "This is a local test"
| 370
|
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCAmelCase_ = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
lowerCAmelCase_ = dataset.iloc[:, 1:2].values
lowerCAmelCase_ = dataset.iloc[:, 2].values
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCAmelCase_ = PolynomialFeatures(degree=4)
lowerCAmelCase_ = poly_reg.fit_transform(X)
lowerCAmelCase_ = LinearRegression()
pol_reg.fit(X_poly, y)
def __magic_name__ ( ) -> Any:
plt.scatter(A , A , color='red' )
plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 332
| 0
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( a_ , unittest.TestCase ):
_lowerCamelCase :List[Any] = ReformerTokenizer
_lowerCamelCase :Any = ReformerTokenizerFast
_lowerCamelCase :str = True
_lowerCamelCase :Any = False
_lowerCamelCase :str = True
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
lowerCAmelCase__ : Optional[Any] = ReformerTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = """<s>"""
lowerCAmelCase__ : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def _lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(UpperCamelCase ) , 10_00 )
def _lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def _lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ : Any = self.get_tokenizer()
lowerCAmelCase__ : List[str] = self.get_rust_tokenizer()
lowerCAmelCase__ : List[Any] = """I was born in 92000, and this is falsé."""
lowerCAmelCase__ : Optional[Any] = tokenizer.tokenize(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Dict = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
lowerCAmelCase__ : str = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = self.get_rust_tokenizer()
lowerCAmelCase__ : Optional[int] = tokenizer.encode(UpperCamelCase )
lowerCAmelCase__ : List[Any] = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : int , UpperCamelCase : Optional[int]=15 ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
# Simple input
lowerCAmelCase__ : int = """This is a simple input"""
lowerCAmelCase__ : List[Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
lowerCAmelCase__ : List[str] = ("""This is a simple input""", """This is a pair""")
lowerCAmelCase__ : Tuple = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="""max_length""" , )
def _lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : int = ReformerTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
lowerCAmelCase__ : List[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [2_85, 46, 10, 1_70, 3_82] , )
lowerCAmelCase__ : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase__ : Dict = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase__ : Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def _lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = """Hello World!"""
lowerCAmelCase__ : List[str] = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(UpperCamelCase , self.big_tokenizer.encode(UpperCamelCase ) )
@slow
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase__ : str = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCAmelCase__ : List[str] = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(UpperCamelCase , self.big_tokenizer.encode(UpperCamelCase ) )
@require_torch
@slow
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowerCAmelCase__ : Dict = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase__ : Union[str, Any] = """ """.join(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = self.big_tokenizer.encode_plus(UpperCamelCase , return_tensors="""pt""" )
lowerCAmelCase__ : Dict = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
lowerCAmelCase__ : Any = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowerCAmelCase__ : Any = encoded_sequence["""input_ids"""].shape
lowerCAmelCase__ : List[Any] = ReformerModel(UpperCamelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCamelCase )
model(**UpperCamelCase )
@slow
def _lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
# fmt: off
lowerCAmelCase__ : str = {"""input_ids""": [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowerCAmelCase__ : Dict = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=UpperCamelCase , sequences=UpperCamelCase , )
| 242
|
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase = 0 ) -> list:
lowerCAmelCase__ : Optional[Any] = length or len(__UpperCAmelCase )
lowerCAmelCase__ : int = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = list_data[i + 1], list_data[i]
lowerCAmelCase__ : List[str] = True
return list_data if not swapped else bubble_sort(__UpperCAmelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242
| 1
|
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase , _lowercase=14 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_labels
_lowerCAmelCase = use_mc_token_ids
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
_lowerCAmelCase = self.vocab_size - 1
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
if self.use_mc_token_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
_lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase ( self ):
"""simple docstring"""
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , *_lowercase ):
"""simple docstring"""
_lowerCAmelCase = CTRLModel(config=_lowercase )
model.to(_lowercase )
model.eval()
model(_lowercase , token_type_ids=_lowercase , head_mask=_lowercase )
model(_lowercase , token_type_ids=_lowercase )
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , *_lowercase ):
"""simple docstring"""
_lowerCAmelCase = CTRLLMHeadModel(_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , *_lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = CTRLForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Tuple = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
_lowercase : List[str] = (CTRLLMHeadModel,) if is_torch_available() else ()
_lowercase : Tuple = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Any = True
_lowercase : Union[str, Any] = False
_lowercase : Tuple = False
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = CTRLModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowercase , n_embd=37 )
def _lowercase ( self ):
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowercase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = CTRLModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _lowercase ( self ):
"""simple docstring"""
pass
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(_lowercase )
_lowerCAmelCase = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=_lowercase ) # Legal the president is
_lowerCAmelCase = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
_lowerCAmelCase = model.generate(_lowercase , do_sample=_lowercase )
self.assertListEqual(output_ids[0].tolist() , _lowercase )
| 371
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = data
_lowerCAmelCase = None
def __iter__( self ):
"""simple docstring"""
_lowerCAmelCase = self
_lowerCAmelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_lowercase )
yield node.data
_lowerCAmelCase = node.next_node
@property
def _lowercase ( self ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_lowercase = Node(1)
_lowercase = Node(2)
_lowercase = Node(3)
_lowercase = Node(4)
print(root_node.has_loop) # False
_lowercase = root_node.next_node
print(root_node.has_loop) # True
_lowercase = Node(5)
_lowercase = Node(6)
_lowercase = Node(5)
_lowercase = Node(6)
print(root_node.has_loop) # False
_lowercase = Node(1)
print(root_node.has_loop) # False
| 229
| 0
|
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if isinstance(UpperCAmelCase , torch.Tensor ):
return image
elif isinstance(UpperCAmelCase , PIL.Image.Image ):
lowercase__ : List[str] = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowercase__ : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
lowercase__ : Any = np.concatenate(UpperCAmelCase , axis=0 )
lowercase__ : Optional[Any] = np.array(UpperCAmelCase ).astype(np.floataa ) / 2_5_5.0
lowercase__ : Dict = image.transpose(0 , 3 , 1 , 2 )
lowercase__ : Union[str, Any] = 2.0 * image - 1.0
lowercase__ : List[str] = torch.from_numpy(UpperCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
lowercase__ : Dict = torch.cat(UpperCAmelCase , dim=0 )
return image
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=0.9_9_9_5 ):
if not isinstance(UpperCAmelCase , np.ndarray ):
lowercase__ : Union[str, Any] = True
lowercase__ : List[str] = va.device
lowercase__ : List[str] = va.cpu().numpy()
lowercase__ : Dict = va.cpu().numpy()
lowercase__ : int = np.sum(va * va / (np.linalg.norm(UpperCAmelCase ) * np.linalg.norm(UpperCAmelCase )) )
if np.abs(UpperCAmelCase ) > DOT_THRESHOLD:
lowercase__ : List[Any] = (1 - t) * va + t * va
else:
lowercase__ : Optional[int] = np.arccos(UpperCAmelCase )
lowercase__ : Tuple = np.sin(UpperCAmelCase )
lowercase__ : Optional[int] = theta_a * t
lowercase__ : Dict = np.sin(UpperCAmelCase )
lowercase__ : List[Any] = np.sin(theta_a - theta_t ) / sin_theta_a
lowercase__ : Optional[int] = sin_theta_t / sin_theta_a
lowercase__ : List[str] = sa * va + sa * va
if inputs_are_torch:
lowercase__ : List[Any] = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
return va
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : int = F.normalize(UpperCAmelCase , dim=-1 )
lowercase__ : Dict = F.normalize(UpperCAmelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
for param in model.parameters():
lowercase__ : Dict = value
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Optional[int]:
super().__init__()
self.register_modules(
vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , clip_model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , coca_model=__lowerCAmelCase , coca_tokenizer=__lowerCAmelCase , coca_transform=__lowerCAmelCase , )
lowercase__ : int = (
feature_extractor.size
if isinstance(feature_extractor.size , __lowerCAmelCase )
else feature_extractor.size['''shortest_edge''']
)
lowercase__ : int = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __lowerCAmelCase )
set_requires_grad(self.clip_model , __lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[Any]:
self.enable_attention_slicing(__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Optional[int]:
set_requires_grad(self.vae , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[Any]:
set_requires_grad(self.vae , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Any:
set_requires_grad(self.unet , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
set_requires_grad(self.unet , __lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
# get the original timestep using init_timestep
lowercase__ : List[Any] = min(int(num_inference_steps * strength ) , __lowerCAmelCase )
lowercase__ : Tuple = max(num_inference_steps - init_timestep , 0 )
lowercase__ : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ) -> Tuple:
if not isinstance(__lowerCAmelCase , torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(__lowerCAmelCase )}""" )
lowercase__ : Optional[int] = image.to(device=__lowerCAmelCase , dtype=__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : List[str] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
lowercase__ : List[Any] = torch.cat(__lowerCAmelCase , dim=0 )
else:
lowercase__ : Tuple = self.vae.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase__ : Dict = 0.1_8_2_1_5 * init_latents
lowercase__ : List[str] = init_latents.repeat_interleave(__lowerCAmelCase , dim=0 )
lowercase__ : List[str] = randn_tensor(init_latents.shape , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase )
# get latents
lowercase__ : Any = self.scheduler.add_noise(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase__ : Union[str, Any] = init_latents
return latents
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]:
lowercase__ : Optional[Any] = self.coca_transform(__lowerCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowercase__ : Any = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
lowercase__ : str = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
lowercase__ : str = self.feature_extractor.preprocess(__lowerCAmelCase )
lowercase__ : Optional[Any] = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
lowercase__ : List[str] = self.clip_model.get_image_features(__lowerCAmelCase )
lowercase__ : str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__lowerCAmelCase )
lowercase__ : Dict = image_embeddings_clip.repeat_interleave(__lowerCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> List[str]:
lowercase__ : int = latents.detach().requires_grad_()
lowercase__ : int = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
# predict the noise residual
lowercase__ : str = self.unet(__lowerCAmelCase , __lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowercase__ : Union[str, Any] = self.scheduler.alphas_cumprod[timestep]
lowercase__ : Dict = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ : str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowercase__ : Optional[Any] = torch.sqrt(__lowerCAmelCase )
lowercase__ : str = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __lowerCAmelCase ):
lowercase__ : List[Any] = self.scheduler.sigmas[index]
lowercase__ : Union[str, Any] = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase__ : Optional[Any] = 1 / 0.1_8_2_1_5 * sample
lowercase__ : List[str] = self.vae.decode(__lowerCAmelCase ).sample
lowercase__ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
lowercase__ : Optional[Any] = transforms.Resize(self.feature_extractor_size )(__lowerCAmelCase )
lowercase__ : Optional[Any] = self.normalize(__lowerCAmelCase ).to(latents.dtype )
lowercase__ : int = self.clip_model.get_image_features(__lowerCAmelCase )
lowercase__ : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__lowerCAmelCase )
lowercase__ : Union[str, Any] = spherical_dist_loss(__lowerCAmelCase , __lowerCAmelCase ).mean() * clip_guidance_scale
lowercase__ : str = -torch.autograd.grad(__lowerCAmelCase , __lowerCAmelCase )[0]
if isinstance(self.scheduler , __lowerCAmelCase ):
lowercase__ : Dict = latents.detach() + grads * (sigma**2)
lowercase__ : Dict = noise_pred_original
else:
lowercase__ : Optional[Any] = noise_pred_original - torch.sqrt(__lowerCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 512 , __lowerCAmelCase = 512 , __lowerCAmelCase = 0.6 , __lowerCAmelCase = 50 , __lowerCAmelCase = 7.5 , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 100 , __lowerCAmelCase = None , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , __lowerCAmelCase = 0.8 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , ) -> Any:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(__lowerCAmelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(__lowerCAmelCase , torch.Generator ) and batch_size > 1:
lowercase__ : List[Any] = [generator] + [None] * (batch_size - 1)
lowercase__ : Any = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
lowercase__ : Dict = [x[0] for x in coca_is_none if x[1]]
lowercase__ : Optional[Any] = ''', '''.join(__lowerCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__lowerCAmelCase ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
lowercase__ : Optional[int] = self.get_image_description(__lowerCAmelCase )
if style_prompt is None:
if len(__lowerCAmelCase ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
lowercase__ : Optional[int] = self.get_image_description(__lowerCAmelCase )
# get prompt text embeddings for content and style
lowercase__ : Union[str, Any] = self.tokenizer(
__lowerCAmelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__lowerCAmelCase , return_tensors='''pt''' , )
lowercase__ : List[Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowercase__ : int = self.tokenizer(
__lowerCAmelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__lowerCAmelCase , return_tensors='''pt''' , )
lowercase__ : Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowercase__ : Dict = slerp(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# duplicate text embeddings for each generation per prompt
lowercase__ : int = text_embeddings.repeat_interleave(__lowerCAmelCase , dim=0 )
# set timesteps
lowercase__ : List[Any] = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowercase__ : List[Any] = {}
if accepts_offset:
lowercase__ : str = 1
self.scheduler.set_timesteps(__lowerCAmelCase , **__lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowercase__ , lowercase__ : int = self.get_timesteps(__lowerCAmelCase , __lowerCAmelCase , self.device )
lowercase__ : int = timesteps[:1].repeat(__lowerCAmelCase )
# Preprocess image
lowercase__ : Union[str, Any] = preprocess(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase__ : List[str] = self.prepare_latents(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , text_embeddings.dtype , self.device , __lowerCAmelCase )
lowercase__ : Optional[int] = preprocess(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase__ : Dict = self.prepare_latents(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , text_embeddings.dtype , self.device , __lowerCAmelCase )
lowercase__ : Tuple = slerp(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if clip_guidance_scale > 0:
lowercase__ : List[Any] = self.get_clip_image_embeddings(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : List[Any] = self.get_clip_image_embeddings(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : Union[str, Any] = slerp(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ : Optional[int] = content_text_input.input_ids.shape[-1]
lowercase__ : Dict = self.tokenizer([''''''] , padding='''max_length''' , max_length=__lowerCAmelCase , return_tensors='''pt''' )
lowercase__ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowercase__ : Tuple = uncond_embeddings.repeat_interleave(__lowerCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ : Union[str, Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ : Any = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ : Tuple = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowercase__ : Union[str, Any] = torch.randn(__lowerCAmelCase , generator=__lowerCAmelCase , device='''cpu''' , dtype=__lowerCAmelCase ).to(
self.device )
else:
lowercase__ : int = torch.randn(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device , dtype=__lowerCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ : Tuple = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ : List[str] = {}
if accepts_eta:
lowercase__ : Any = eta
# check if the scheduler accepts generator
lowercase__ : Dict = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowercase__ : Any = generator
with self.progress_bar(total=__lowerCAmelCase ):
for i, t in enumerate(__lowerCAmelCase ):
# expand the latents if we are doing classifier free guidance
lowercase__ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ : Optional[int] = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
# predict the noise residual
lowercase__ : Optional[Any] = self.unet(__lowerCAmelCase , __lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ : List[str] = noise_pred.chunk(2 )
lowercase__ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowercase__ : Union[str, Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowercase__ , lowercase__ : Any = self.cond_fn(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : Optional[int] = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase__ : Dict = 1 / 0.1_8_2_1_5 * latents
lowercase__ : Any = self.vae.decode(__lowerCAmelCase ).sample
lowercase__ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowercase__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : int = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__lowerCAmelCase , nsfw_content_detected=__lowerCAmelCase )
| 198
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : List[str] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __UpperCamelCase ( UpperCAmelCase = 100 ):
lowercase__ : Dict = 1
lowercase__ : Optional[int] = 2
for i in range(2 , max_n + 1 ):
lowercase__ : List[str] = pre_numerator
lowercase__ : Optional[Any] = 2 * i // 3 if i % 3 == 0 else 1
lowercase__ : Union[str, Any] = cur_numerator
lowercase__ : int = e_cont * pre_numerator + temp
return sum_digits(UpperCAmelCase )
if __name__ == "__main__":
print(F'{solution() = }')
| 198
| 1
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
A: str = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> List[Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : Dict = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
UpperCAmelCase : Optional[Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Optional[int] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id="""test-model-flax""" , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
UpperCAmelCase : Dict = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase : Tuple = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Optional[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F"{key} not identical" )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : List[str] = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
UpperCAmelCase : Optional[Any] = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
UpperCAmelCase : Any = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : str = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
UpperCAmelCase : Optional[int] = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
UpperCAmelCase : Any = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F"{key} not identical" )
def _snake_case ( UpperCamelCase : Any , UpperCamelCase : List[Any] ):
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : Union[str, Any] = flatten_dict(modela.params )
UpperCAmelCase : Tuple = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase : List[str] = False
return models_are_equal
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
UpperCAmelCase : Dict = FlaxBertModel(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[int] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : List[Any] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
UpperCAmelCase : Dict = FlaxBertModel(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , max_shard_size="""10KB""" )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : str = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = """bert"""
UpperCAmelCase : Union[str, Any] = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Dict = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = """bert"""
UpperCAmelCase : int = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : int = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 76
|
"""simple docstring"""
import math
import sys
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Dict = """"""
try:
with open(UpperCamelCase , """rb""" ) as binary_file:
UpperCAmelCase : str = binary_file.read()
for dat in data:
UpperCAmelCase : List[Any] = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Optional[int] = {"""0""": """0""", """1""": """1"""}
UpperCAmelCase , UpperCAmelCase : Optional[int] = """""", """"""
UpperCAmelCase : int = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCAmelCase : Any = lexicon[curr_string]
result += last_match_id
UpperCAmelCase : Any = last_match_id + """0"""
if math.loga(UpperCamelCase ).is_integer():
UpperCAmelCase : Optional[Any] = {}
for curr_key in list(UpperCamelCase ):
UpperCAmelCase : Dict = lexicon.pop(UpperCamelCase )
UpperCAmelCase : int = new_lex
UpperCAmelCase : int = last_match_id + """1"""
index += 1
UpperCAmelCase : List[str] = """"""
return result
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : Dict = 8
try:
with open(UpperCamelCase , """wb""" ) as opened_file:
UpperCAmelCase : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Any = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCAmelCase : List[str] = data_bits[counter:]
UpperCAmelCase : Tuple = data_bits[counter + 1 :]
return data_bits
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : int = read_file_binary(UpperCamelCase )
UpperCAmelCase : str = remove_prefix(UpperCamelCase )
UpperCAmelCase : Any = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 76
| 1
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
a__ : List[str] = get_logger(__name__)
a__ : Optional[Any] = Path(__file__).parent / '''model_card_template.md'''
a__ : Optional[int] = uuida().hex
a__ : Any = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
a__ : Any = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
a__ : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def UpperCAmelCase__ (lowerCAmelCase_ = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
ua += "; " + user_agent
return ua
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
'''simple docstring'''
if token is None:
__SCREAMING_SNAKE_CASE = HfFolder.get_token()
if organization is None:
__SCREAMING_SNAKE_CASE = whoami(lowerCAmelCase_ )["name"]
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(lowerCAmelCase_ , "local_rank" ) and args.local_rank not in [-1, 0]:
return
__SCREAMING_SNAKE_CASE = args.hub_token if hasattr(lowerCAmelCase_ , "hub_token" ) else None
__SCREAMING_SNAKE_CASE = get_full_repo_name(lowerCAmelCase_ , token=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=lowerCAmelCase_ , model_name=lowerCAmelCase_ , repo_name=lowerCAmelCase_ , dataset_name=args.dataset_name if hasattr(lowerCAmelCase_ , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(lowerCAmelCase_ , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(lowerCAmelCase_ , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(lowerCAmelCase_ , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(lowerCAmelCase_ , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(lowerCAmelCase_ , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(lowerCAmelCase_ , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(lowerCAmelCase_ , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(lowerCAmelCase_ , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(lowerCAmelCase_ , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(lowerCAmelCase_ , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
__SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , "README.md" )
model_card.save(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = None ):
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
__SCREAMING_SNAKE_CASE = str(Path(lowerCAmelCase_ ).as_posix() )
__SCREAMING_SNAKE_CASE = re.search(R"snapshots/([^/]+)/" , lowerCAmelCase_ )
if search is None:
return None
__SCREAMING_SNAKE_CASE = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(lowerCAmelCase_ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
a__ : Tuple = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
a__ : int = os.path.join(hf_cache_home, '''diffusers''')
def UpperCAmelCase__ (lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
'''simple docstring'''
if new_cache_dir is None:
__SCREAMING_SNAKE_CASE = DIFFUSERS_CACHE
if old_cache_dir is None:
__SCREAMING_SNAKE_CASE = old_diffusers_cache
__SCREAMING_SNAKE_CASE = Path(lowerCAmelCase_ ).expanduser()
__SCREAMING_SNAKE_CASE = Path(lowerCAmelCase_ ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__SCREAMING_SNAKE_CASE = new_cache_dir / old_blob_path.relative_to(lowerCAmelCase_ )
new_blob_path.parent.mkdir(parents=lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
os.replace(lowerCAmelCase_ , lowerCAmelCase_ )
try:
os.symlink(lowerCAmelCase_ , lowerCAmelCase_ )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
a__ : int = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
a__ : Any = 0
else:
with open(cache_version_file) as f:
try:
a__ : Optional[int] = int(f.read())
except ValueError:
a__ : Optional[int] = 0
if cache_version < 1:
a__ : List[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
a__ : Union[str, Any] = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
'''the directory exists and can be written to.'''
)
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = None ):
'''simple docstring'''
if variant is not None:
__SCREAMING_SNAKE_CASE = weights_name.split("." )
__SCREAMING_SNAKE_CASE = splits[:-1] + [variant] + splits[-1:]
__SCREAMING_SNAKE_CASE = ".".join(lowerCAmelCase_ )
return weights_name
def UpperCAmelCase__ (lowerCAmelCase_ , *,
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = str(lowerCAmelCase_ )
if os.path.isfile(lowerCAmelCase_ ):
return pretrained_model_name_or_path
elif os.path.isdir(lowerCAmelCase_ ):
if os.path.isfile(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ):
# Load from a PyTorch checkpoint
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ):
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(lowerCAmelCase_ ).base_version ) >= version.parse("0.20.0" )
):
try:
__SCREAMING_SNAKE_CASE = hf_hub_download(
lowerCAmelCase_ , filename=_add_variant(lowerCAmelCase_ , lowerCAmelCase_ ) , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , user_agent=lowerCAmelCase_ , subfolder=lowerCAmelCase_ , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , lowerCAmelCase_ , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(lowerCAmelCase_ , lowerCAmelCase_ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(lowerCAmelCase_ , lowerCAmelCase_ )}' so that the correct variant file can be added.""" , lowerCAmelCase_ , )
try:
# 2. Load model file as usual
__SCREAMING_SNAKE_CASE = hf_hub_download(
lowerCAmelCase_ , filename=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , user_agent=lowerCAmelCase_ , subfolder=lowerCAmelCase_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
"this model name. Check the model page at "
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 54
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
A_ :List[str] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
A_ :Any = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
A_ :Tuple = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
A_ :List[str] = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
A_ :Tuple = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=[1, 10, 100] , lowerCamelCase__=4 , lowerCamelCase__=3.0 ):
"""simple docstring"""
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=lowerCamelCase__ ) as executor:
__UpperCamelCase : List[str] =[]
__UpperCamelCase : Any =Counter()
__UpperCamelCase : List[Any] =0
__UpperCamelCase : int =defaultdict(lowerCamelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ):
for candidate in candidates:
__UpperCamelCase : str =candidate + '\n' + test_case
__UpperCamelCase : Any =(test_program, timeout, task_id, completion_id[task_id])
__UpperCamelCase : Optional[Any] =executor.submit(lowerCamelCase__ , *lowerCamelCase__ )
futures.append(lowerCamelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase__ ):
__UpperCamelCase : str =future.result()
results[result["task_id"]].append((result['completion_id'], result) )
__UpperCamelCase , __UpperCamelCase : int =[], []
for result in results.values():
result.sort()
__UpperCamelCase : str =[r[1]['passed'] for r in result]
total.append(len(lowerCamelCase__ ) )
correct.append(sum(lowerCamelCase__ ) )
__UpperCamelCase : Optional[int] =np.array(lowerCamelCase__ )
__UpperCamelCase : List[str] =np.array(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =k
__UpperCamelCase : List[Any] ={f'pass@{k}': estimate_pass_at_k(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A ( a_ ,a_ ,a_ ) -> Optional[int]:
def estimator(a_ ,a_ ,a_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(a_ ,a_ ):
__UpperCamelCase : Optional[int] =itertools.repeat(a_ ,len(a_ ) )
else:
assert len(a_ ) == len(a_ )
__UpperCamelCase : List[Any] =iter(a_ )
return np.array([estimator(int(a_ ) ,int(a_ ) ,a_ ) for n, c in zip(a_ ,a_ )] )
| 71
| 0
|
"""simple docstring"""
from __future__ import annotations
lowercase__ = list[tuple[int, int]]
lowercase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __snake_case :
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[Any]:
'''simple docstring'''
a__: Union[str, Any] = pos_x
a__: Union[str, Any] = pos_y
a__: Tuple = (pos_y, pos_x)
a__: Any = goal_x
a__: List[Any] = goal_y
a__: Union[str, Any] = g_cost
a__: int = parent
a__: List[Any] = self.calculate_heuristic()
def lowerCamelCase_ ( self) -> float:
'''simple docstring'''
a__: Dict = abs(self.pos_x - self.goal_x)
a__: Any = abs(self.pos_y - self.goal_y)
return dx + dy
def __lt__( self , lowercase) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class __snake_case :
def __init__( self , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__: Dict = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase)
a__: List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowercase)
a__: Any = [self.start]
a__: list[Node] = []
a__: Optional[int] = False
def lowerCamelCase_ ( self) -> Path | None:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a__: List[Any] = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
a__: List[Any] = True
return self.retrace_path(lowercase)
self.closed_nodes.append(lowercase)
a__: str = self.get_successors(lowercase)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowercase)
else:
# retrieve the best current path
a__: str = self.open_nodes.pop(self.open_nodes.index(lowercase))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowercase)
else:
self.open_nodes.append(lowercase)
if not self.reached:
return [self.start.pos]
return None
def lowerCamelCase_ ( self , lowercase) -> list[Node]:
'''simple docstring'''
a__: Tuple = []
for action in delta:
a__: Optional[int] = parent.pos_x + action[1]
a__: Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(lowercase) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowercase , lowercase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase , ))
return successors
def lowerCamelCase_ ( self , lowercase) -> Path:
'''simple docstring'''
a__: Union[str, Any] = node
a__: Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
a__: Dict = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase__ = (0, 0)
lowercase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
lowercase__ = GreedyBestFirst(init, goal)
lowercase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase__ = 2
for elem in grid:
print(elem)
| 203
|
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(lowercase):
a__: Optional[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: Any = FlaxAutoModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(lowercase):
a__: str = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: Any = FlaxAutoModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
a__: Dict = AutoTokenizer.from_pretrained(lowercase)
a__: Union[str, Any] = FlaxBertModel.from_pretrained(lowercase)
a__: List[str] = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**lowercase):
return model(**lowercase)
eval(**lowercase).block_until_ready()
@slow
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
a__: Optional[Any] = AutoTokenizer.from_pretrained(lowercase)
a__: Any = FlaxRobertaModel.from_pretrained(lowercase)
a__: int = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**lowercase):
return model(**lowercase)
eval(**lowercase).block_until_ready()
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , 'bert-base is not a local folder and is not a valid model identifier'):
a__: str = FlaxAutoModel.from_pretrained('bert-base')
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
a__: List[str] = FlaxAutoModel.from_pretrained(lowercase , revision='aaaaaa')
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
a__: List[str] = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(lowercase , 'Use `from_pt=True` to load this model'):
a__: List[str] = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only')
| 203
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : str = logging.get_logger(__name__)
def __lowercase ( _A , _A=False ) -> List[Any]:
SCREAMING_SNAKE_CASE : Dict = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("""head""" ):
SCREAMING_SNAKE_CASE : List[str] = '''segformer.encoder.''' + key
if key.startswith("""backbone""" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace("""backbone""" , """segformer.encoder""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
SCREAMING_SNAKE_CASE : str = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(UpperCamelCase__ )-1}" )
if "norm" in key:
SCREAMING_SNAKE_CASE : List[Any] = key.replace("""norm""" , """layer_norm""" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
SCREAMING_SNAKE_CASE : List[str] = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )]
SCREAMING_SNAKE_CASE : List[Any] = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(UpperCamelCase__ )-1}" )
if "layer_norm1" in key:
SCREAMING_SNAKE_CASE : Tuple = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
SCREAMING_SNAKE_CASE : List[Any] = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
SCREAMING_SNAKE_CASE : List[Any] = key[key.find("""block""" ) + len("""block""" )]
SCREAMING_SNAKE_CASE : str = key.replace(F"block{idx}" , F"block.{int(UpperCamelCase__ )-1}" )
if "attn.q" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
SCREAMING_SNAKE_CASE : Any = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
SCREAMING_SNAKE_CASE : Tuple = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
SCREAMING_SNAKE_CASE : Tuple = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
SCREAMING_SNAKE_CASE : List[Any] = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
SCREAMING_SNAKE_CASE : List[Any] = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
SCREAMING_SNAKE_CASE : int = key[key.find("""linear_c""" ) + len("""linear_c""" )]
SCREAMING_SNAKE_CASE : List[Any] = key.replace(F"linear_c{idx}" , F"linear_c.{int(UpperCamelCase__ )-1}" )
if key.startswith("""head""" ):
SCREAMING_SNAKE_CASE : Tuple = key.replace("""head""" , """classifier""" )
SCREAMING_SNAKE_CASE : Tuple = value
return new_state_dict
def __lowercase ( _A , _A ) -> Optional[int]:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.weight" )
SCREAMING_SNAKE_CASE : str = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Optional[Any] = kv_weight[
: config.hidden_sizes[i], :
]
SCREAMING_SNAKE_CASE : int = kv_bias[: config.hidden_sizes[i]]
SCREAMING_SNAKE_CASE : str = kv_weight[
config.hidden_sizes[i] :, :
]
SCREAMING_SNAKE_CASE : Union[str, Any] = kv_bias[
config.hidden_sizes[i] :
]
def __lowercase ( ) -> Any:
SCREAMING_SNAKE_CASE : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return image
@torch.no_grad()
def __lowercase ( _A , _A , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE : Dict = SegformerConfig()
SCREAMING_SNAKE_CASE : str = False
# set attributes based on model_name
SCREAMING_SNAKE_CASE : Dict = '''huggingface/label-files'''
if "segformer" in model_name:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2]
if "ade" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 150
SCREAMING_SNAKE_CASE : Any = '''ade20k-id2label.json'''
SCREAMING_SNAKE_CASE : List[str] = (1, 150, 128, 128)
elif "city" in model_name:
SCREAMING_SNAKE_CASE : Any = 19
SCREAMING_SNAKE_CASE : int = '''cityscapes-id2label.json'''
SCREAMING_SNAKE_CASE : str = (1, 19, 128, 128)
else:
raise ValueError(F"Model {model_name} not supported" )
elif "mit" in model_name:
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Optional[Any] = model_name[4:6]
SCREAMING_SNAKE_CASE : str = 1000
SCREAMING_SNAKE_CASE : List[Any] = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Dict = (1, 1000)
else:
raise ValueError(F"Model {model_name} not supported" )
# set config attributes
SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Any = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
SCREAMING_SNAKE_CASE : List[str] = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE : List[str] = 256
elif size == "b2":
SCREAMING_SNAKE_CASE : Union[str, Any] = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE : Optional[Any] = 768
SCREAMING_SNAKE_CASE : Any = [3, 4, 6, 3]
elif size == "b3":
SCREAMING_SNAKE_CASE : Optional[Any] = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE : Any = 768
SCREAMING_SNAKE_CASE : Tuple = [3, 4, 18, 3]
elif size == "b4":
SCREAMING_SNAKE_CASE : List[Any] = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE : int = 768
SCREAMING_SNAKE_CASE : str = [3, 8, 27, 3]
elif size == "b5":
SCREAMING_SNAKE_CASE : Optional[int] = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE : Dict = 768
SCREAMING_SNAKE_CASE : Union[str, Any] = [3, 6, 40, 3]
else:
raise ValueError(F"Size {size} not supported" )
# load image processor (only resize + normalize)
SCREAMING_SNAKE_CASE : Any = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
# prepare image
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
logger.info(F"Converting model {model_name}..." )
# load original state dict
if encoder_only:
SCREAMING_SNAKE_CASE : Dict = torch.load(UpperCamelCase__ , map_location=torch.device("""cpu""" ) )
else:
SCREAMING_SNAKE_CASE : Any = torch.load(UpperCamelCase__ , map_location=torch.device("""cpu""" ) )['''state_dict''']
# rename keys
SCREAMING_SNAKE_CASE : Tuple = rename_keys(UpperCamelCase__ , encoder_only=UpperCamelCase__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase__ , UpperCamelCase__ )
# create HuggingFace model and load state dict
if encoder_only:
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Tuple = SegformerForImageClassification(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = SegformerForSemanticSegmentation(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# forward pass
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
SCREAMING_SNAKE_CASE : int = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
SCREAMING_SNAKE_CASE : int = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you\'d like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
UpperCAmelCase__ : Union[str, Any] = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 245
|
'''simple docstring'''
from PIL import Image
def _a( UpperCamelCase__ : Image, UpperCamelCase__ : float ):
'''simple docstring'''
def brightness(UpperCamelCase__ : int ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(UpperCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
a_ = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 152
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->list[int]:
"""simple docstring"""
lowercase__ : str= [True] * limit
lowercase__ : Tuple= False
lowercase__ : int= False
lowercase__ : int= True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowercase__ : str= i * 2
while index < limit:
lowercase__ : List[Any]= False
lowercase__ : List[Any]= index + i
lowercase__ : Any= [2]
for i in range(3 , A , 2 ):
if is_prime[i]:
primes.append(A )
return primes
def lowercase__(A = 1_000_000 ) ->int:
"""simple docstring"""
lowercase__ : int= prime_sieve(A )
lowercase__ : List[Any]= 0
lowercase__ : int= 0
for i in range(len(A ) ):
for j in range(i + length , len(A ) ):
lowercase__ : List[str]= sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase__ : Dict= j - i
lowercase__ : Any= sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 150
|
"""simple docstring"""
from __future__ import annotations
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__=None ):
'''simple docstring'''
lowercase__ : Union[str, Any]= data
lowercase__ : Optional[Any]= None
def __repr__( self ):
'''simple docstring'''
lowercase__ : str= []
lowercase__ : Tuple= self
while temp:
string_rep.append(F'''{temp.data}''' )
lowercase__ : Optional[int]= temp.next
return "->".join(snake_case__ )
def lowercase__(A ) ->Dict:
"""simple docstring"""
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase__ : Optional[int]= Node(elements_list[0] )
for i in range(1 , len(A ) ):
lowercase__ : Optional[Any]= Node(elements_list[i] )
lowercase__ : str= current.next
return head
def lowercase__(A ) ->None:
"""simple docstring"""
if head_node is not None and isinstance(A , A ):
print_reverse(head_node.next )
print(head_node.data )
def lowercase__() ->str:
"""simple docstring"""
from doctest import testmod
testmod()
lowercase__ : Optional[int]= make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(A )
print("Elements in Reverse:" )
print_reverse(A )
if __name__ == "__main__":
main()
| 150
| 1
|
def __lowercase ( lowerCamelCase : dict ):
UpperCamelCase_ : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
UpperCamelCase_ : set[int] = set()
return any(
node not in visited and depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for node in graph )
def __lowercase ( lowerCamelCase : dict , lowerCamelCase : int , lowerCamelCase : set , lowerCamelCase : set ):
visited.add(UpperCAmelCase_ )
rec_stk.add(UpperCAmelCase_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(UpperCAmelCase_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 175
|
"""simple docstring"""
def __UpperCAmelCase ( ) -> int:
'''simple docstring'''
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(UpperCAmelCase_ , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 172
| 0
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase :str = filter(lambda __magic_name__ : p.requires_grad , model.parameters() )
UpperCamelCase :List[str] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase_ : Tuple = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if metric == "rouge2":
UpperCamelCase :Any = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
UpperCamelCase :Optional[int] = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
UpperCamelCase :Optional[Any] = """{val_avg_em:.4f}-{step_count}"""
elif metric == "loss":
UpperCamelCase :List[Any] = """{val_avg_loss:.4f}-{step_count}"""
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
""" function.""" )
UpperCamelCase :Optional[Any] = ModelCheckpoint(
dirpath=__lowerCamelCase , filename=__lowerCamelCase , monitor=f"""val_{metric}""" , mode="""max""" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Optional[int] ) -> Any:
"""simple docstring"""
return EarlyStopping(
monitor=f"""val_{metric}""" , mode="""min""" if """loss""" in metric else """max""" , patience=__lowerCamelCase , verbose=__lowerCamelCase , )
class _SCREAMING_SNAKE_CASE ( pl.Callback ):
def _A ( self : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
UpperCamelCase :Any = {F"""lr_group_{i}""": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_lowerCamelCase )
@rank_zero_only
def _A ( self : Any , __lowerCamelCase : pl.Trainer , __lowerCamelCase : pl.LightningModule , __lowerCamelCase : str , __lowerCamelCase : Any=True ):
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
UpperCamelCase :Dict = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
UpperCamelCase :Tuple = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCamelCase :Any = od / """test_results.txt"""
UpperCamelCase :int = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCamelCase :Optional[Any] = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
UpperCamelCase :Optional[int] = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_lowerCamelCase )
generations_file.parent.mkdir(exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , """a+""" ) as writer:
for key in sorted(_lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCamelCase :Tuple = metrics[key]
if isinstance(_lowerCamelCase , torch.Tensor ):
UpperCamelCase :Optional[int] = val.item()
UpperCamelCase :List[str] = F"""{key}: {val:.6f}\n"""
writer.write(_lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
UpperCamelCase :List[str] = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_lowerCamelCase )
@rank_zero_only
def _A ( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ):
try:
UpperCamelCase :Optional[Any] = pl_module.model.model.num_parameters()
except AttributeError:
UpperCamelCase :str = pl_module.model.num_parameters()
UpperCamelCase :Union[str, Any] = count_trainable_parameters(_lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def _A ( self : Dict , __lowerCamelCase : pl.Trainer , __lowerCamelCase : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_lowerCamelCase , _lowerCamelCase , """test""" )
@rank_zero_only
def _A ( self : Any , __lowerCamelCase : pl.Trainer , __lowerCamelCase : Any ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 368
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
UpperCAmelCase_ : List[Any] = {
'''moussaKam/mbarthez''': 10_24,
'''moussaKam/barthez''': 10_24,
'''moussaKam/barthez-orangesum-title''': 10_24,
}
UpperCAmelCase_ : List[str] = '''▁'''
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Optional[int] = VOCAB_FILES_NAMES
snake_case__ : int = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : str="<mask>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase :int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
UpperCamelCase :int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
UpperCamelCase :Union[str, Any] = vocab_file
UpperCamelCase :List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
UpperCamelCase :Optional[int] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
UpperCamelCase :Tuple = len(self.sp_model ) - 1
UpperCamelCase :List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _A ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase :Any = [self.cls_token_id]
UpperCamelCase :Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _A ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _A ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
UpperCamelCase :Any = [self.sep_token_id]
UpperCamelCase :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _A ( self : List[Any] ):
return len(self.sp_model )
def _A ( self : Any ):
UpperCamelCase :Optional[int] = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _A ( self : int , __lowerCamelCase : str ):
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def _A ( self : Dict , __lowerCamelCase : Optional[int] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase :List[Any] = self.sp_model.PieceToId(__lowerCamelCase )
return spm_id if spm_id else self.unk_token_id
def _A ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__lowerCamelCase )
def _A ( self : Optional[int] , __lowerCamelCase : Union[str, Any] ):
UpperCamelCase :List[Any] = []
UpperCamelCase :str = """"""
UpperCamelCase :Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
UpperCamelCase :List[str] = True
UpperCamelCase :Dict = []
else:
current_sub_tokens.append(__lowerCamelCase )
UpperCamelCase :Optional[Any] = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __getstate__( self : str ):
UpperCamelCase :Tuple = self.__dict__.copy()
UpperCamelCase :str = None
return state
def __setstate__( self : Tuple , __lowerCamelCase : Optional[int] ):
UpperCamelCase :Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase :Any = {}
UpperCamelCase :str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A ( self : str , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase :Union[str, Any] = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , """wb""" ) as fi:
UpperCamelCase :List[str] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 62
| 0
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _SCREAMING_SNAKE_CASE (A , A=0.999 , A="cosine" , ) -> List[str]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
lowercase__ = []
for i in range(A_ ):
lowercase__ = i / num_diffusion_timesteps
lowercase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A_ ) / alpha_bar_fn(A_ ) , A_ ) )
return torch.tensor(A_ , dtype=torch.floataa )
class __lowerCAmelCase (UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
lowerCAmelCase__ : Any = 2
@register_to_config
def __init__(self : Any , UpperCamelCase : int = 1000 , UpperCamelCase : float = 0.0_00_85 , UpperCamelCase : float = 0.0_12 , UpperCamelCase : str = "linear" , UpperCamelCase : Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase : str = "epsilon" , UpperCamelCase : Optional[bool] = False , UpperCamelCase : Optional[bool] = False , UpperCamelCase : float = 1.0 , UpperCamelCase : str = "linspace" , UpperCamelCase : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowercase__ = torch.tensor(__lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase__ = torch.linspace(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase__ = betas_for_alpha_bar(__lowerCamelCase , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
lowercase__ = betas_for_alpha_bar(__lowerCamelCase , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
lowercase__ = 1.0 - self.betas
lowercase__ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase__ = use_karras_sigmas
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : Dict=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowercase__ = self.timesteps
lowercase__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowercase__ = 1 if len(__lowerCamelCase ) > 1 else 0
else:
lowercase__ = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
lowercase__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase__ (self : Tuple , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowercase__ = self.index_for_timestep(__lowerCamelCase )
lowercase__ = self.sigmas[step_index]
lowercase__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase__ (self : Dict , UpperCamelCase : int , UpperCamelCase : Union[str, torch.device] = None , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
lowercase__ = num_inference_steps
lowercase__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowercase__ = np.linspace(0 , num_train_timesteps - 1 , __lowerCamelCase , dtype=__lowerCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowercase__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase__ = (np.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(__lowerCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowercase__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase__ = (np.arange(__lowerCamelCase , 0 , -step_ratio )).round().copy().astype(__lowerCamelCase )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
lowercase__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowercase__ = np.log(__lowerCamelCase )
lowercase__ = np.interp(__lowerCamelCase , np.arange(0 , len(__lowerCamelCase ) ) , __lowerCamelCase )
if self.config.use_karras_sigmas:
lowercase__ = self._convert_to_karras(in_sigmas=__lowerCamelCase , num_inference_steps=self.num_inference_steps )
lowercase__ = np.array([self._sigma_to_t(__lowerCamelCase , __lowerCamelCase ) for sigma in sigmas] )
lowercase__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowercase__ = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase )
lowercase__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowercase__ = torch.from_numpy(__lowerCamelCase )
lowercase__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__lowerCamelCase ).startswith('''mps''' ):
# mps does not support float64
lowercase__ = timesteps.to(__lowerCamelCase , dtype=torch.floataa )
else:
lowercase__ = timesteps.to(device=__lowerCamelCase )
# empty dt and derivative
lowercase__ = None
lowercase__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowercase__ = defaultdict(__lowerCamelCase )
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = np.log(__lowerCamelCase )
# get distribution
lowercase__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowercase__ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowercase__ = low_idx + 1
lowercase__ = log_sigmas[low_idx]
lowercase__ = log_sigmas[high_idx]
# interpolate sigmas
lowercase__ = (low - log_sigma) / (low - high)
lowercase__ = np.clip(__lowerCamelCase , 0 , 1 )
# transform interpolation to time range
lowercase__ = (1 - w) * low_idx + w * high_idx
lowercase__ = t.reshape(sigma.shape )
return t
def UpperCamelCase__ (self : str , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Any ):
'''simple docstring'''
lowercase__ = in_sigmas[-1].item()
lowercase__ = in_sigmas[0].item()
lowercase__ = 7.0 # 7.0 is the value used in the paper
lowercase__ = np.linspace(0 , 1 , __lowerCamelCase )
lowercase__ = sigma_min ** (1 / rho)
lowercase__ = sigma_max ** (1 / rho)
lowercase__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
return self.dt is None
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : Union[torch.FloatTensor, np.ndarray] , UpperCamelCase : Union[float, torch.FloatTensor] , UpperCamelCase : Union[torch.FloatTensor, np.ndarray] , UpperCamelCase : bool = True , ):
'''simple docstring'''
lowercase__ = self.index_for_timestep(__lowerCamelCase )
# advance index counter by 1
lowercase__ = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowercase__ = self.sigmas[step_index]
lowercase__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowercase__ = self.sigmas[step_index - 1]
lowercase__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowercase__ = 0
lowercase__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowercase__ = sigma_hat if self.state_in_first_order else sigma_next
lowercase__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = sigma_hat if self.state_in_first_order else sigma_next
lowercase__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowercase__ = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
lowercase__ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowercase__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowercase__ = sigma_next - sigma_hat
# store for 2nd order step
lowercase__ = derivative
lowercase__ = dt
lowercase__ = sample
else:
# 2. 2nd order / Heun's method
lowercase__ = (sample - pred_original_sample) / sigma_next
lowercase__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowercase__ = self.dt
lowercase__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCamelCase )
def UpperCamelCase__ (self : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : torch.FloatTensor , UpperCamelCase : torch.FloatTensor , ):
'''simple docstring'''
lowercase__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCamelCase ):
# mps does not support float64
lowercase__ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowercase__ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowercase__ = self.timesteps.to(original_samples.device )
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = [self.index_for_timestep(__lowerCamelCase , __lowerCamelCase ) for t in timesteps]
lowercase__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowercase__ = sigma.unsqueeze(-1 )
lowercase__ = original_samples + noise * sigma
return noisy_samples
def __len__(self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 2
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _a :
"""simple docstring"""
def __init__( self: str , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str=sys.maxsize ):
'''simple docstring'''
UpperCamelCase__: List[Any] = "bilinear"
UpperCamelCase__: Optional[int] = max_size
UpperCamelCase__: Optional[int] = short_edge_length
def __call__( self: Optional[Any] , __lowerCamelCase: str ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = []
for img in imgs:
UpperCamelCase__ , UpperCamelCase__: Any = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCamelCase__: Optional[int] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCamelCase__: Dict = size * 1.0 / min(__lowerCamelCase , __lowerCamelCase )
if h < w:
UpperCamelCase__ , UpperCamelCase__: Optional[Any] = size, scale * w
else:
UpperCamelCase__ , UpperCamelCase__: Dict = scale * h, size
if max(__lowerCamelCase , __lowerCamelCase ) > self.max_size:
UpperCamelCase__: str = self.max_size * 1.0 / max(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: List[str] = newh * scale
UpperCamelCase__: Any = neww * scale
UpperCamelCase__: List[str] = int(neww + 0.5 )
UpperCamelCase__: List[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCamelCase__: Dict = Image.fromarray(__lowerCamelCase )
UpperCamelCase__: Any = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCamelCase__: str = np.asarray(__lowerCamelCase )
else:
UpperCamelCase__: Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCamelCase__: Optional[Any] = nn.functional.interpolate(
__lowerCamelCase , (newh, neww) , mode=self.interp_method , align_corners=__lowerCamelCase ).squeeze(0 )
img_augs.append(__lowerCamelCase )
return img_augs
class _a :
"""simple docstring"""
def __init__( self: Dict , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCamelCase__: Union[str, Any] = cfg.INPUT.FORMAT
UpperCamelCase__: Union[str, Any] = cfg.SIZE_DIVISIBILITY
UpperCamelCase__: Tuple = cfg.PAD_VALUE
UpperCamelCase__: str = cfg.INPUT.MAX_SIZE_TEST
UpperCamelCase__: int = cfg.MODEL.DEVICE
UpperCamelCase__: str = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCamelCase__: int = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCamelCase__: List[Any] = lambda __lowerCamelCase : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Dict = tuple(max(__lowerCamelCase ) for s in zip(*[img.shape for img in images] ) )
UpperCamelCase__: Tuple = [im.shape[-2:] for im in images]
UpperCamelCase__: Optional[int] = [
nn.functional.pad(
__lowerCamelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(__lowerCamelCase , __lowerCamelCase )
]
return torch.stack(__lowerCamelCase ), torch.tensor(__lowerCamelCase )
def __call__( self: str , __lowerCamelCase: Dict , __lowerCamelCase: Any=False ):
'''simple docstring'''
with torch.no_grad():
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase__: int = [images]
if single_image:
assert len(__lowerCamelCase ) == 1
for i in range(len(__lowerCamelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(__lowerCamelCase , images.pop(__lowerCamelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
__lowerCamelCase , torch.as_tensor(img_tensorize(images.pop(__lowerCamelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCamelCase__: int = torch.tensor([im.shape[:2] for im in images] )
UpperCamelCase__: int = self.aug(__lowerCamelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCamelCase__: Any = [self.normalizer(__lowerCamelCase ) for x in images]
# now pad them to do the following operations
UpperCamelCase__ , UpperCamelCase__: Any = self.pad(__lowerCamelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCamelCase__: Optional[int] = torch.true_divide(__lowerCamelCase , __lowerCamelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCAmelCase_ ( A_ ,A_):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCAmelCase_ ( A_ ,A_):
assert torch.isfinite(A_).all(), "Box tensor contains infinite or NaN!"
UpperCamelCase__ , UpperCamelCase__: int = box_size
tensor[:, 0].clamp_(min=0 ,max=A_)
tensor[:, 1].clamp_(min=0 ,max=A_)
tensor[:, 2].clamp_(min=0 ,max=A_)
tensor[:, 3].clamp_(min=0 ,max=A_)
| 149
| 0
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : str , a_ : Tuple ) -> str:
__SCREAMING_SNAKE_CASE :List[str] = 1.5
__SCREAMING_SNAKE_CASE :str = int(factor * num_class_images )
__SCREAMING_SNAKE_CASE :Union[str, Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_lowerCAmelCase , aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''' , exist_ok=_lowerCAmelCase )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
__SCREAMING_SNAKE_CASE :Optional[Any] = client.query(text=_lowerCAmelCase )
if len(_lowerCAmelCase ) >= factor * num_class_images or num_images > 1e4:
break
else:
__SCREAMING_SNAKE_CASE :Dict = int(factor * num_images )
__SCREAMING_SNAKE_CASE :Union[str, Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_lowerCAmelCase , aesthetic_weight=0.1 , )
__SCREAMING_SNAKE_CASE :Any = 0
__SCREAMING_SNAKE_CASE :Optional[int] = 0
__SCREAMING_SNAKE_CASE :List[Any] = tqdm(desc='''downloading real regularization images''' , total=_lowerCAmelCase )
with open(f'''{class_data_dir}/caption.txt''' , '''w''' ) as fa, open(f'''{class_data_dir}/urls.txt''' , '''w''' ) as fa, open(
f'''{class_data_dir}/images.txt''' , '''w''' ) as fa:
while total < num_class_images:
__SCREAMING_SNAKE_CASE :List[Any] = class_images[count]
count += 1
try:
__SCREAMING_SNAKE_CASE :Optional[Any] = requests.get(images['''url'''] )
if img.status_code == 2_00:
__SCREAMING_SNAKE_CASE :Any = Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __lowerCamelCase ( ) -> List[Any]:
__SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser('''''' , add_help=_lowerCAmelCase )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=_lowerCAmelCase , type=_lowerCAmelCase )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=_lowerCAmelCase , type=_lowerCAmelCase )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=_lowerCAmelCase )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 354
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {"vocab_file": "spiece.model"}
lowerCamelCase_ = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
lowerCamelCase_ = {"bert_for_seq_generation": 5_1_2}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[int] = []
SCREAMING_SNAKE_CASE_ : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__="<s>" ,SCREAMING_SNAKE_CASE__="</s>" ,SCREAMING_SNAKE_CASE__="<unk>" ,SCREAMING_SNAKE_CASE__="<pad>" ,SCREAMING_SNAKE_CASE__="<::::>" ,SCREAMING_SNAKE_CASE__ = None ,**SCREAMING_SNAKE_CASE__ ,) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__SCREAMING_SNAKE_CASE :Dict = vocab_file
__SCREAMING_SNAKE_CASE :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self.__dict__.copy()
__SCREAMING_SNAKE_CASE :Dict = None
return state
def __setstate__( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__SCREAMING_SNAKE_CASE :str = {}
__SCREAMING_SNAKE_CASE :str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
return token
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = []
__SCREAMING_SNAKE_CASE :List[str] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
__SCREAMING_SNAKE_CASE :Dict = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE :Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ ,'''wb''' ) as fi:
__SCREAMING_SNAKE_CASE :Tuple = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 239
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=_lowercase):
snake_case__ : List[Any] = ["note_seq"]
def __init__( self : str , *__lowerCAmelCase : int , **__lowerCAmelCase : str ):
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict , *__lowerCAmelCase : str , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Dict ):
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 72
|
"""simple docstring"""
def snake_case_ ( A_ : list[list] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = current_set.copy()
for row_index, row in enumerate(A_ ):
_lowerCamelCase : Tuple = row[0]
for column_index, column in enumerate(A_ ):
if magnitude == 0:
_lowerCamelCase : List[Any] = column
continue
_lowerCamelCase : List[Any] = column / magnitude
# Subtract to cancel term
_lowerCamelCase : Union[str, Any] = current_set[0]
_lowerCamelCase : Dict = [first_row]
_lowerCamelCase : str = current_set[1::]
for row in current_set:
_lowerCamelCase : Union[str, Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(A_ )
continue
for column_index in range(len(A_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(A_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_lowerCamelCase : Any = final_set[0]
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[int] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_lowerCamelCase : Dict = simplify(A_ )
for i in range(len(A_ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, A_ )
_lowerCamelCase : Tuple = resultant
return final_set
def snake_case_ ( A_ : list[list] ):
'''simple docstring'''
if len(A_ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
_lowerCamelCase : Dict = len(A_ ) + 1
if any(len(A_ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(A_, (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(A_ ) == 1:
return [equations[0][-1] / equations[0][0]]
_lowerCamelCase : Optional[Any] = equations.copy()
if any(0 in row for row in data_set ):
_lowerCamelCase : str = data_set.copy()
_lowerCamelCase : List[Any] = []
for row_index, row in enumerate(A_ ):
if 0 not in row:
_lowerCamelCase : Union[str, Any] = data_set.pop(A_ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0, A_ )
_lowerCamelCase : List[str] = data_set.copy()
_lowerCamelCase : int = simplify(A_ )
_lowerCamelCase : int = simplified[::-1]
_lowerCamelCase : list = []
for row in simplified:
_lowerCamelCase : Tuple = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_lowerCamelCase : Optional[Any] = row.copy()[: len(A_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(A_ ) == 0:
solutions.append(0 )
continue
_lowerCamelCase : Tuple = temp_row[1::]
_lowerCamelCase : Tuple = temp_row[::-1]
for column_index, column in enumerate(A_ ):
current_solution -= column * solutions[column_index]
solutions.append(A_ )
_lowerCamelCase : Optional[int] = []
for item in solutions:
final.append(float(round(A_, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 72
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A ={
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 368
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _snake_case ( a__ ):
lowerCAmelCase :torch.FloatTensor
class _snake_case ( a__ , a__ ):
@register_to_config
def __init__( self , _lowerCamelCase = 3 , _lowerCamelCase = 3 , _lowerCamelCase = ("DownEncoderBlock2D",) , _lowerCamelCase = ("UpDecoderBlock2D",) , _lowerCamelCase = (64,) , _lowerCamelCase = 1 , _lowerCamelCase = "silu" , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 256 , _lowerCamelCase = 32 , _lowerCamelCase = None , _lowerCamelCase = 0.18215 , _lowerCamelCase = "group" , ):
super().__init__()
# pass init params to Encoder
UpperCAmelCase__ : str = Encoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , down_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , double_z=_lowerCamelCase , )
UpperCAmelCase__ : Optional[Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCAmelCase__ : Any = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1)
UpperCAmelCase__ : Optional[int] = VectorQuantizer(_lowerCamelCase , _lowerCamelCase , beta=0.25 , remap=_lowerCamelCase , sane_index_shape=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1)
# pass init params to Decoder
UpperCAmelCase__ : str = Decoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , up_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , norm_type=_lowerCamelCase , )
@apply_forward_hook
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = True):
UpperCAmelCase__ : Union[str, Any] = self.encoder(_lowerCamelCase)
UpperCAmelCase__ : str = self.quant_conv(_lowerCamelCase)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_lowerCamelCase)
@apply_forward_hook
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = True):
# also go through quantization layer
if not force_not_quantize:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.quantize(_lowerCamelCase)
else:
UpperCAmelCase__ : Union[str, Any] = h
UpperCAmelCase__ : Any = self.post_quant_conv(_lowerCamelCase)
UpperCAmelCase__ : Any = self.decoder(_lowerCamelCase , quant if self.config.norm_type == """spatial""" else None)
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = True):
UpperCAmelCase__ : Dict = sample
UpperCAmelCase__ : Dict = self.encode(_lowerCamelCase).latents
UpperCAmelCase__ : List[str] = self.decode(_lowerCamelCase).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase)
| 283
| 0
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Union[str, Any]:
return "".join(sorted(__lowerCamelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Any:
return word_by_signature[signature(__lowerCamelCase )]
lowerCamelCase__ : Dict = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
lowerCamelCase__ : Dict = sorted({word.strip().lower() for word in data.splitlines()})
lowerCamelCase__ : Optional[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCamelCase__ : List[str] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 225
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase ( A_ ):
A__ : List[str] = "megatron-bert"
def __init__(self : Optional[int] , snake_case__ : List[str]=2_90_56 , snake_case__ : List[Any]=10_24 , snake_case__ : str=24 , snake_case__ : Tuple=16 , snake_case__ : Union[str, Any]=40_96 , snake_case__ : str="gelu" , snake_case__ : str=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_12 , snake_case__ : Union[str, Any]=2 , snake_case__ : Dict=0.02 , snake_case__ : List[Any]=1e-12 , snake_case__ : int=0 , snake_case__ : Tuple="absolute" , snake_case__ : Any=True , **snake_case__ : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
snake_case : Tuple = vocab_size
snake_case : str = hidden_size
snake_case : str = num_hidden_layers
snake_case : str = num_attention_heads
snake_case : Optional[int] = hidden_act
snake_case : int = intermediate_size
snake_case : List[str] = hidden_dropout_prob
snake_case : Union[str, Any] = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : List[str] = type_vocab_size
snake_case : List[str] = initializer_range
snake_case : Tuple = layer_norm_eps
snake_case : int = position_embedding_type
snake_case : str = use_cache
| 59
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCamelCase ( lowercase_ ):
lowercase = 'Salesforce/blip-image-captioning-base'
lowercase = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
lowercase = 'image_captioner'
lowercase = AutoModelForVisionaSeq
lowercase = ['image']
lowercase = ['text']
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['vision'] )
super().__init__(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
return self.pre_processor(images=__UpperCamelCase ,return_tensors='pt' )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
return self.model.generate(**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor.batch_decode(__UpperCamelCase ,skip_special_tokens=__UpperCamelCase )[0].strip()
| 321
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]]
lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 )
lowercase_ : str = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 )
lowercase_ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 )
lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 321
| 1
|
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , a : List[str] , a : str=7 , a : int=3 , a : Dict=18 , a : Optional[Any]=30 , a : int=400 , a : str=True , a : Dict=None , a : str=True , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : List[Any] = min_resolution
SCREAMING_SNAKE_CASE : str = max_resolution
SCREAMING_SNAKE_CASE : List[Any] = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : Dict = do_normalize
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =ImageGPTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = ImageGPTImageProcessingTester(self )
@property
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "clusters" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE : Any = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(a , obj[key] ) )
else:
self.assertEqual(obj[key] , a )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : int = os.path.join(a , "image_processor.json" )
image_processor_first.to_json_file(a )
SCREAMING_SNAKE_CASE : Any = self.image_processing_class.from_json_file(a ).to_dict()
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , a )
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(a )
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_pretrained(a ).to_dict()
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , a )
@unittest.skip("ImageGPT requires clusters at initialization" )
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : List[Any] = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test")
SCREAMING_SNAKE_CASE : Dict = Image.open(dataset[4]["file"])
SCREAMING_SNAKE_CASE : List[Any] = Image.open(dataset[5]["file"])
SCREAMING_SNAKE_CASE : List[Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
SCREAMING_SNAKE_CASE : Dict = prepare_images()
# test non-batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
SCREAMING_SNAKE_CASE : Optional[Any] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , a )
# test batched
SCREAMING_SNAKE_CASE : Dict = image_processing(a , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
SCREAMING_SNAKE_CASE : Any = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , a )
| 76
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Any = tokenizer(example["content"] , truncation=_a)["input_ids"]
SCREAMING_SNAKE_CASE : Dict = len(example["content"]) / len(output["input_ids"])
return output
a_ = HfArgumentParser(PretokenizationArguments)
a_ = parser.parse_args()
if args.num_workers is None:
a_ = multiprocessing.cpu_count()
a_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a_ = time.time()
a_ = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a_ = time.time()
a_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 76
| 1
|
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
a : Tuple = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
a : Tuple = BASE_URL + """/user"""
# https://github.com/settings/tokens
a : List[Any] = os.environ.get("""USER_TOKEN""", """""")
def lowercase__(A ) ->dict[Any, Any]:
"""simple docstring"""
lowercase__ : List[str]= {
"Authorization": f'''token {auth_token}''',
"Accept": "application/vnd.github.v3+json",
}
return requests.get(A , headers=A ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 150
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : Union[str, Any] = False
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Any= "A painting of a squirrel eating a burger "
lowercase__ : Optional[Any]= torch.manual_seed(0 )
lowercase__ : List[str]= pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case__ )
lowercase__ : Optional[Any]= VersatileDiffusionTextToImagePipeline.from_pretrained(snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Any= generator.manual_seed(0 )
lowercase__ : Tuple= pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : List[str]= "A painting of a squirrel eating a burger "
lowercase__ : Union[str, Any]= torch.manual_seed(0 )
lowercase__ : Optional[Any]= pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
lowercase__ : List[str]= image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Optional[int]= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 150
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MgpstrTokenizer
_lowerCamelCase = False
_lowerCamelCase = {}
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
lowerCamelCase_ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowerCamelCase_ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + "\n" )
def snake_case ( self , **UpperCamelCase ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = "tester"
lowerCamelCase_ = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase_ = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
lowerCamelCase_ = tokenizer.encode([special_token] , add_special_tokens=UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 1 )
lowerCamelCase_ = tokenizer.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
self.assertTrue(special_token not in decoded )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase_ ,lowerCamelCase_ = self.get_input_output_texts(UpperCamelCase )
lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase )
lowerCamelCase_ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertNotEqual(len(UpperCamelCase ) , 0 )
lowerCamelCase_ = tokenizer.decode(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual(text_a.replace(" " , "" ) , UpperCamelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def snake_case ( self ):
"""simple docstring"""
pass
| 55
|
from __future__ import annotations
import time
a =list[tuple[int, int]]
a =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a =[[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class A_ :
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Node | None):
__lowerCamelCase : Tuple = pos_x
__lowerCamelCase : List[str] = pos_y
__lowerCamelCase : str = (pos_y, pos_x)
__lowerCamelCase : str = goal_x
__lowerCamelCase : int = goal_y
__lowerCamelCase : List[Any] = parent
class A_ :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tuple[int, int] ,SCREAMING_SNAKE_CASE__ : tuple[int, int]):
__lowerCamelCase : Any = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = [self.start]
__lowerCamelCase : List[str] = False
def lowerCAmelCase ( self : List[Any]):
while self.node_queue:
__lowerCamelCase : Any = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
__lowerCamelCase : Dict = True
return self.retrace_path(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.get_successors(SCREAMING_SNAKE_CASE__)
for node in successors:
self.node_queue.append(SCREAMING_SNAKE_CASE__)
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Node):
__lowerCamelCase : Union[str, Any] = []
for action in delta:
__lowerCamelCase : Optional[Any] = parent.pos_x + action[1]
__lowerCamelCase : Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE__) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.target.pos_y ,self.target.pos_x ,SCREAMING_SNAKE_CASE__))
return successors
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Node | None):
__lowerCamelCase : List[Any] = node
__lowerCamelCase : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
__lowerCamelCase : int = current_node.parent
path.reverse()
return path
class A_ :
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : int = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = False
def lowerCAmelCase ( self : str):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__lowerCamelCase : Any = self.fwd_bfs.node_queue.pop(0)
__lowerCamelCase : Any = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
__lowerCamelCase : List[str] = True
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = current_bwd_node
__lowerCamelCase : int = current_fwd_node
__lowerCamelCase : str = {
self.fwd_bfs: self.fwd_bfs.get_successors(SCREAMING_SNAKE_CASE__),
self.bwd_bfs: self.bwd_bfs.get_successors(SCREAMING_SNAKE_CASE__),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(SCREAMING_SNAKE_CASE__)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Node ,SCREAMING_SNAKE_CASE__ : Node):
__lowerCamelCase : List[Any] = self.fwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = self.bwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__)
bwd_path.pop()
bwd_path.reverse()
__lowerCamelCase : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a =(0, 0)
a =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a =time.time()
a =BreadthFirstSearch(init, goal)
a =bfs.search()
a =time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
a =time.time()
a =BidirectionalBreadthFirstSearch(init, goal)
a =bd_bfs.search()
a =time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 73
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ :Tuple = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Union[str, Any] = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
A_ :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 245
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A ( a_ = 3 ) -> qiskit.result.counts.Counts:
if isinstance(a_ ,a_ ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(a_ ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
__UpperCamelCase : str =QuantumRegister(a_ ,'qr' )
__UpperCamelCase : Optional[int] =ClassicalRegister(a_ ,'cr' )
__UpperCamelCase : Optional[Any] =QuantumCircuit(a_ ,a_ )
__UpperCamelCase : Any =number_of_qubits
for i in range(a_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(a_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) ,a_ ,a_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(a_ ,number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(a_ ,a_ )
# simulate with 10000 shots
__UpperCamelCase : Any =Aer.get_backend('qasm_simulator' )
__UpperCamelCase : Tuple =execute(a_ ,a_ ,shots=10_000 )
return job.result().get_counts(a_ )
if __name__ == "__main__":
print(
f"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 245
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : str = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( A__ ):
'''simple docstring'''
UpperCAmelCase__ = 'dpr'
def __init__( self : Tuple , UpperCAmelCase__ : Optional[int]=30_522 , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=3_072 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Union[str, Any]=512 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : List[Any]=1e-12 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : List[str]="absolute" , UpperCAmelCase__ : List[Any] = 0 , **UpperCAmelCase__ : List[str] , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = projection_dim
A__ = position_embedding_type
| 14
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337
| 0
|
"""simple docstring"""
import numpy as np
class _lowerCAmelCase :
def __init__( self ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = (0, 0)
snake_case : int = None
snake_case : Optional[int] = 0
snake_case : Dict = 0
snake_case : Optional[Any] = 0
def __eq__( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.position == cell.position
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
print(self.position )
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__=(5, 5) ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = np.zeros(__lowercase )
snake_case : Tuple = world_size[0]
snake_case : List[Any] = world_size[1]
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
print(self.w )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[int] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
snake_case : List[str] = cell.position[0]
snake_case : Dict = cell.position[1]
snake_case : int = []
for n in neughbour_cord:
snake_case : int = current_x + n[0]
snake_case : Tuple = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
snake_case : int = Cell()
snake_case : Optional[int] = (x, y)
snake_case : Union[str, Any] = cell
neighbours.append(__lowercase )
return neighbours
def __lowerCAmelCase ( lowercase : Dict , lowercase : List[Any] , lowercase : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case : str = []
snake_case : str = []
_open.append(lowercase )
while _open:
snake_case : int = np.argmin([n.f for n in _open] )
snake_case : Dict = _open[min_f]
_closed.append(_open.pop(lowercase ) )
if current == goal:
break
for n in world.get_neigbours(lowercase ):
for c in _closed:
if c == n:
continue
snake_case : List[Any] = current.g + 1
snake_case : int = n.position
snake_case : List[Any] = goal.position
snake_case : List[str] = (ya - ya) ** 2 + (xa - xa) ** 2
snake_case : List[str] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowercase )
snake_case : int = []
while current.parent is not None:
path.append(current.position )
snake_case : Optional[int] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__snake_case = Gridworld()
# Start position and goal
__snake_case = Cell()
__snake_case = (0, 0)
__snake_case = Cell()
__snake_case = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
__snake_case = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__snake_case = 1
print(world.w)
| 367
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowerCAmelCase ( lowercase : List[str] , lowercase : int , lowercase : Dict , lowercase : Dict , lowercase : int ) -> int:
"""simple docstring"""
with open(lowercase ) as metadata_file:
snake_case : str = json.load(lowercase )
snake_case : Optional[Any] = LukeConfig(use_entity_aware_attention=lowercase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
snake_case : Tuple = torch.load(lowercase , map_location="cpu" )["module"]
# Load the entity vocab file
snake_case : Optional[Any] = load_original_entity_vocab(lowercase )
# add an entry for [MASK2]
snake_case : Dict = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
snake_case : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case : Tuple = AddedToken("<ent>" , lstrip=lowercase , rstrip=lowercase )
snake_case : str = AddedToken("<ent2>" , lstrip=lowercase , rstrip=lowercase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowercase )
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "r" ) as f:
snake_case : str = json.load(lowercase )
snake_case : List[str] = "MLukeTokenizer"
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowercase , lowercase )
snake_case : Dict = MLukeTokenizer.from_pretrained(lowercase )
# Initialize the embeddings of the special tokens
snake_case : Tuple = tokenizer.convert_tokens_to_ids(["@"] )[0]
snake_case : str = tokenizer.convert_tokens_to_ids(["#"] )[0]
snake_case : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
snake_case : str = word_emb[ent_init_index].unsqueeze(0 )
snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
snake_case : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
snake_case : Tuple = state_dict[bias_name]
snake_case : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
snake_case : Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
snake_case : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case : Optional[int] = F'encoder.layer.{layer_index}.attention.self.'
snake_case : Optional[Any] = state_dict[prefix + matrix_name]
snake_case : Optional[Any] = state_dict[prefix + matrix_name]
snake_case : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case : List[Any] = state_dict["entity_embeddings.entity_embeddings.weight"]
snake_case : str = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
snake_case : Tuple = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
snake_case : Optional[int] = state_dict["entity_predictions.bias"]
snake_case : Optional[int] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
snake_case : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
snake_case : Union[str, Any] = LukeForMaskedLM(config=lowercase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
snake_case : Tuple = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
snake_case : Any = state_dict[key]
else:
snake_case : Tuple = state_dict[key]
snake_case ,snake_case : Optional[Any] = model.load_state_dict(lowercase , strict=lowercase )
if set(lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
snake_case : Optional[Any] = MLukeTokenizer.from_pretrained(lowercase , task="entity_classification" )
snake_case : List[str] = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
snake_case : str = (0, 9)
snake_case : Union[str, Any] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
snake_case : int = model(**lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case : int = torch.Size((1, 33, 768) )
snake_case : str = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case : Any = torch.Size((1, 1, 768) )
snake_case : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
snake_case : List[str] = MLukeTokenizer.from_pretrained(lowercase )
snake_case : List[Any] = "Tokyo is the capital of <mask>."
snake_case : Optional[Any] = (24, 30)
snake_case : List[str] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
snake_case : Any = model(**lowercase )
snake_case : int = encoding["input_ids"][0].tolist()
snake_case : str = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
snake_case : Tuple = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase )
snake_case : Tuple = outputs.entity_logits[0][0].argmax().item()
snake_case : Optional[int] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase ) )
model.save_pretrained(lowercase )
def __lowerCAmelCase ( lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case : Tuple = ["[MASK]", "[PAD]", "[UNK]"]
snake_case : Optional[Any] = [json.loads(lowercase ) for line in open(lowercase )]
snake_case : Any = {}
for entry in data:
snake_case : Union[str, Any] = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
snake_case : Union[str, Any] = entity_id
break
snake_case : Dict = F'{language}:{entity_name}'
snake_case : str = entity_id
return new_mapping
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
__snake_case = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 112
| 0
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__lowercase = logging.get_logger(__name__)
# General docstring
__lowercase = '''RegNetConfig'''
# Base docstring
__lowercase = '''facebook/regnet-y-040'''
__lowercase = [1, 1088, 7, 7]
# Image classification docstring
__lowercase = '''facebook/regnet-y-040'''
__lowercase = '''tabby, tabby cat'''
__lowercase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase = 3 , __lowercase = 1 , __lowercase = 1 , __lowercase = "relu" , **__lowercase , ) -> Dict:
super().__init__(**__lowercase)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__UpperCamelCase :Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
__UpperCamelCase :Any = tf.keras.layers.ConvaD(
filters=__lowercase , kernel_size=__lowercase , strides=__lowercase , padding='''VALID''' , groups=__lowercase , use_bias=__lowercase , name='''convolution''' , )
__UpperCamelCase :Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''')
__UpperCamelCase :List[str] = ACTaFN[activation] if activation is not None else tf.identity
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
__UpperCamelCase :Optional[int] = self.convolution(self.padding(__lowercase))
__UpperCamelCase :Optional[int] = self.normalization(__lowercase)
__UpperCamelCase :Tuple = self.activation(__lowercase)
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowercase , **__lowercase) -> int:
super().__init__(**__lowercase)
__UpperCamelCase :Tuple = config.num_channels
__UpperCamelCase :Optional[int] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def UpperCamelCase__ ( self , __lowercase) -> Dict:
__UpperCamelCase :Dict = shape_list(__lowercase)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''')
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__UpperCamelCase :Dict = tf.transpose(__lowercase , perm=(0, 2, 3, 1))
__UpperCamelCase :Optional[int] = self.embedder(__lowercase)
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase = 2 , **__lowercase) -> Optional[Any]:
super().__init__(**__lowercase)
__UpperCamelCase :Union[str, Any] = tf.keras.layers.ConvaD(
filters=__lowercase , kernel_size=1 , strides=__lowercase , use_bias=__lowercase , name='''convolution''')
__UpperCamelCase :int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''')
def UpperCamelCase__ ( self , __lowercase , __lowercase = False) -> tf.Tensor:
return self.normalization(self.convolution(__lowercase) , training=__lowercase)
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , **__lowercase) -> Union[str, Any]:
super().__init__(**__lowercase)
__UpperCamelCase :int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase , name='''pooler''')
__UpperCamelCase :List[Any] = [
tf.keras.layers.ConvaD(filters=__lowercase , kernel_size=1 , activation='''relu''' , name='''attention.0'''),
tf.keras.layers.ConvaD(filters=__lowercase , kernel_size=1 , activation='''sigmoid''' , name='''attention.2'''),
]
def UpperCamelCase__ ( self , __lowercase) -> Any:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__UpperCamelCase :List[str] = self.pooler(__lowercase)
for layer_module in self.attention:
__UpperCamelCase :List[str] = layer_module(__lowercase)
__UpperCamelCase :int = hidden_state * pooled
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase = 1 , **__lowercase) -> Tuple:
super().__init__(**__lowercase)
__UpperCamelCase :str = in_channels != out_channels or stride != 1
__UpperCamelCase :Any = max(1 , out_channels // config.groups_width)
__UpperCamelCase :Optional[Any] = (
TFRegNetShortCut(__lowercase , stride=__lowercase , name='''shortcut''')
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''')
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__UpperCamelCase :Optional[int] = [
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=config.hidden_act , name='''layer.0'''),
TFRegNetConvLayer(
__lowercase , stride=__lowercase , groups=__lowercase , activation=config.hidden_act , name='''layer.1'''),
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=__lowercase , name='''layer.2'''),
]
__UpperCamelCase :Dict = ACTaFN[config.hidden_act]
def UpperCamelCase__ ( self , __lowercase) -> Any:
__UpperCamelCase :Optional[int] = hidden_state
for layer_module in self.layers:
__UpperCamelCase :Any = layer_module(__lowercase)
__UpperCamelCase :Tuple = self.shortcut(__lowercase)
hidden_state += residual
__UpperCamelCase :Optional[Any] = self.activation(__lowercase)
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase = 1 , **__lowercase) -> Union[str, Any]:
super().__init__(**__lowercase)
__UpperCamelCase :int = in_channels != out_channels or stride != 1
__UpperCamelCase :Optional[int] = max(1 , out_channels // config.groups_width)
__UpperCamelCase :Dict = (
TFRegNetShortCut(__lowercase , stride=__lowercase , name='''shortcut''')
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''')
)
__UpperCamelCase :Dict = [
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=config.hidden_act , name='''layer.0'''),
TFRegNetConvLayer(
__lowercase , stride=__lowercase , groups=__lowercase , activation=config.hidden_act , name='''layer.1'''),
TFRegNetSELayer(__lowercase , reduced_channels=int(round(in_channels / 4)) , name='''layer.2'''),
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=__lowercase , name='''layer.3'''),
]
__UpperCamelCase :Dict = ACTaFN[config.hidden_act]
def UpperCamelCase__ ( self , __lowercase) -> Optional[int]:
__UpperCamelCase :str = hidden_state
for layer_module in self.layers:
__UpperCamelCase :Optional[Any] = layer_module(__lowercase)
__UpperCamelCase :List[Any] = self.shortcut(__lowercase)
hidden_state += residual
__UpperCamelCase :List[str] = self.activation(__lowercase)
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase = 2 , __lowercase = 2 , **__lowercase) -> int:
super().__init__(**__lowercase)
__UpperCamelCase :List[Any] = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
__UpperCamelCase :List[str] = [
# downsampling is done in the first layer with stride of 2
layer(__lowercase , __lowercase , __lowercase , stride=__lowercase , name='''layers.0'''),
*[layer(__lowercase , __lowercase , __lowercase , name=f"""layers.{i+1}""") for i in range(depth - 1)],
]
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
for layer_module in self.layers:
__UpperCamelCase :Tuple = layer_module(__lowercase)
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __lowercase , **__lowercase) -> str:
super().__init__(**__lowercase)
__UpperCamelCase :Any = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ))
__UpperCamelCase :str = zip(config.hidden_sizes , config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(__lowercase , config.depths[1:])):
self.stages.append(TFRegNetStage(__lowercase , __lowercase , __lowercase , depth=__lowercase , name=f"""stages.{i+1}"""))
def UpperCamelCase__ ( self , __lowercase , __lowercase = False , __lowercase = True) -> TFBaseModelOutputWithNoAttention:
__UpperCamelCase :Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCamelCase :Tuple = hidden_states + (hidden_state,)
__UpperCamelCase :Optional[Any] = stage_module(__lowercase)
if output_hidden_states:
__UpperCamelCase :Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=__lowercase , hidden_states=__lowercase)
@keras_serializable
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
a__ : Optional[Any] = RegNetConfig
def __init__( self , __lowercase , **__lowercase) -> Union[str, Any]:
super().__init__(**__lowercase)
__UpperCamelCase :List[str] = config
__UpperCamelCase :List[Any] = TFRegNetEmbeddings(__lowercase , name='''embedder''')
__UpperCamelCase :Any = TFRegNetEncoder(__lowercase , name='''encoder''')
__UpperCamelCase :Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase , name='''pooler''')
@unpack_inputs
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__UpperCamelCase :Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase :Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase :Union[str, Any] = self.embedder(__lowercase , training=__lowercase)
__UpperCamelCase :Any = self.encoder(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase)
__UpperCamelCase :List[str] = encoder_outputs[0]
__UpperCamelCase :List[str] = self.pooler(__lowercase)
# Change to NCHW output format have uniformity in the modules
__UpperCamelCase :str = tf.transpose(__lowercase , perm=(0, 3, 1, 2))
__UpperCamelCase :List[Any] = tf.transpose(__lowercase , perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__UpperCamelCase :List[str] = tuple([tf.transpose(__lowercase , perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowercase , pooler_output=__lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : int = RegNetConfig
a__ : List[str] = """regnet"""
a__ : Optional[int] = """pixel_values"""
@property
def UpperCamelCase__ ( self) -> str:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa)}
__lowercase = r'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowercase = r'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , UpperCAmelCase_ , )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , *__lowercase , **__lowercase) -> List[Any]:
super().__init__(__lowercase , *__lowercase , **__lowercase)
__UpperCamelCase :Tuple = TFRegNetMainLayer(__lowercase , name='''regnet''')
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowercase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__UpperCamelCase :int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase :Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase :List[Any] = self.regnet(
pixel_values=__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , UpperCAmelCase_ , )
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , *__lowercase , **__lowercase) -> int:
super().__init__(__lowercase , *__lowercase , **__lowercase)
__UpperCamelCase :Optional[Any] = config.num_labels
__UpperCamelCase :str = TFRegNetMainLayer(__lowercase , name='''regnet''')
# classification head
__UpperCamelCase :List[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''') if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowercase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase__ ( self , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__UpperCamelCase :Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase :int = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase :Union[str, Any] = self.regnet(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase)
__UpperCamelCase :List[str] = outputs.pooler_output if return_dict else outputs[1]
__UpperCamelCase :str = self.classifier[0](__lowercase)
__UpperCamelCase :List[Any] = self.classifier[1](__lowercase)
__UpperCamelCase :Tuple = None if labels is None else self.hf_compute_loss(labels=__lowercase , logits=__lowercase)
if not return_dict:
__UpperCamelCase :Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__lowercase , logits=__lowercase , hidden_states=outputs.hidden_states)
| 43
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 3_2
def __UpperCAmelCase ( lowercase ,lowercase = 16 ):
"""simple docstring"""
_UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase = datasets.map(
lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase = 8
else:
_UpperCAmelCase = None
return tokenizer.pad(
lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
_UpperCAmelCase = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1":
_UpperCAmelCase = 2
# Initialize accelerator
_UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config["""lr"""]
_UpperCAmelCase = int(config["""num_epochs"""] )
_UpperCAmelCase = int(config["""seed"""] )
_UpperCAmelCase = int(config["""batch_size"""] )
_UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase )
def inner_training_loop(lowercase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase )
_UpperCAmelCase , _UpperCAmelCase = get_dataloaders(lowercase ,lowercase )
# Instantiate scheduler
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase ,references=lowercase ,)
_UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' ,lowercase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase ,lowercase )
if __name__ == "__main__":
main()
| 289
| 0
|
"""simple docstring"""
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_UpperCamelCase : int = logging.getLogger()
_UpperCamelCase : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class snake_case ( UpperCAmelCase ):
def lowerCamelCase__ ( self : Union[str, Any] , A : str ):
'''simple docstring'''
os.makedirs(A , exist_ok=A )
a : Tuple = {'source': 'What is love ?', 'target': 'life'}
a : Optional[Any] = {'train': 1_2, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
a : List[Any] = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(A , F'''{split}.{field}''' ) , 'w' ) as f:
f.write(A )
def lowerCamelCase__ ( self : Tuple , A : int , A : str = "pytorch" ):
'''simple docstring'''
a : Dict = self.get_auto_remove_tmp_dir()
a : Tuple = os.path.join(A , 'output' )
a : str = os.path.join(A , 'data' )
self._create_dummy_data(data_dir=A )
a : List[str] = F'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(F'''--gpus={gpus}''' )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
a : List[str] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(A , env=self.get_env() )
a : List[Any] = os.path.join(A , 'metrics.json' )
with open(A ) as f:
a : str = json.load(A )
return result
@require_torch_gpu
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : Any = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Optional[Any] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : Any = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Any = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 186
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase : List[str] = logging.get_logger(__name__)
_UpperCamelCase : Optional[Any] = '▁'
_UpperCamelCase : List[Any] = {'vocab_file': 'sentencepiece.bpe.model'}
_UpperCamelCase : Optional[int] = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
_UpperCamelCase : List[str] = {
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
_UpperCamelCase : List[str] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class snake_case ( UpperCAmelCase ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ['''input_ids''', '''attention_mask''']
__magic_name__ = []
__magic_name__ = []
def __init__( self : List[str] , A : Union[str, Any] , A : List[Any]="<s>" , A : Dict="</s>" , A : List[Any]="</s>" , A : Any="<s>" , A : Dict="<unk>" , A : Any="<pad>" , A : Optional[int]="<mask>" , A : str=None , A : Tuple=None , A : List[str]=None , A : Optional[Dict[str, Any]] = None , A : Any=None , A : List[Any]=False , **A : Tuple , ):
'''simple docstring'''
a : int = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
a : Optional[Any] = legacy_behaviour
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , tokenizer_file=A , src_lang=A , tgt_lang=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=A , **A , )
a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
a : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
a : List[str] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a : Any = 1
a : int = len(self.sp_model )
a : Optional[int] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A )
}
a : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
a : Optional[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
a : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
a : List[str] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
a : Optional[int] = src_lang if src_lang is not None else 'eng_Latn'
a : List[Any] = self.lang_code_to_id[self._src_lang]
a : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ):
'''simple docstring'''
a : Dict = self.__dict__.copy()
a : int = None
a : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , A : Any ):
'''simple docstring'''
a : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
a : Any = {}
a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCamelCase__ ( self : Dict , A : str ):
'''simple docstring'''
a : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase__ ( self : Optional[int] , A : List[int] , A : Optional[List[int]] = None , A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
a : Tuple = [1] * len(self.prefix_tokens )
a : int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A )) + suffix_ones
return prefix_ones + ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def lowerCamelCase__ ( self : Any , A : List[int] , A : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__ ( self : Optional[int] , A : List[int] , A : Optional[List[int]] = None ):
'''simple docstring'''
a : List[str] = [self.sep_token_id]
a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : List[str] , A : Optional[int] , A : str , A : Optional[str] , A : Optional[str] , **A : str ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
a : Any = src_lang
a : Any = self(A , add_special_tokens=A , return_tensors=A , **A )
a : Tuple = self.convert_tokens_to_ids(A )
a : Optional[Any] = tgt_lang_id
return inputs
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : Union[str, Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self : Any , A : str ):
'''simple docstring'''
return self.sp_model.encode(A , out_type=A )
def lowerCamelCase__ ( self : Union[str, Any] , A : Tuple ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a : int = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ ( self : Tuple , A : List[str] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self : List[str] , A : Dict ):
'''simple docstring'''
a : List[str] = ''.join(A ).replace(A , ' ' ).strip()
return out_string
def lowerCamelCase__ ( self : Any , A : str , A : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a : Optional[int] = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , 'wb' ) as fi:
a : Tuple = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def lowerCamelCase__ ( self : Any , A : List[str] , A : str = "eng_Latn" , A : Optional[List[str]] = None , A : str = "fra_Latn" , **A : Optional[int] , ):
'''simple docstring'''
a : Union[str, Any] = src_lang
a : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(A , A , **A )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__ ( self : Union[str, Any] , A : Dict ):
'''simple docstring'''
a : Optional[int] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
a : List[Any] = []
a : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
a : Union[str, Any] = [self.cur_lang_code]
a : List[str] = [self.eos_token_id]
def lowerCamelCase__ ( self : Optional[Any] , A : str ):
'''simple docstring'''
a : Tuple = self.lang_code_to_id[lang]
if self.legacy_behaviour:
a : List[str] = []
a : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
a : Union[str, Any] = [self.cur_lang_code]
a : List[str] = [self.eos_token_id]
| 186
| 1
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__A = "examples/"
__A = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__A = {
"init": "src/diffusers/__init__.py",
"setup": "setup.py",
}
__A = "README.md"
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
with open(__SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase: Union[str, Any] = f.read()
__lowerCAmelCase , __lowerCAmelCase: Dict = REPLACE_PATTERNS[pattern]
__lowerCAmelCase: Tuple = replace.replace("VERSION" , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = re_pattern.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
for folder, directories, fnames in os.walk(__SCREAMING_SNAKE_CASE ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , pattern="examples" )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not patch:
update_version_in_examples(__SCREAMING_SNAKE_CASE )
def a__ ( ) -> List[Any]:
__lowerCAmelCase: Optional[Any] = "🤗 Transformers currently provides the following architectures"
__lowerCAmelCase: List[str] = "1. Want to contribute a new model?"
with open(__SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase: int = f.readlines()
# Find the start of the list.
__lowerCAmelCase: Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase: Dict = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__lowerCAmelCase: int = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , )
index += 1
with open(__SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(__SCREAMING_SNAKE_CASE )
def a__ ( ) -> List[str]:
with open(REPLACE_FILES["init"] , "r" ) as f:
__lowerCAmelCase: int = f.read()
__lowerCAmelCase: List[str] = REPLACE_PATTERNS["init"][0].search(__SCREAMING_SNAKE_CASE ).groups()[0]
return packaging.version.parse(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE=False ) -> str:
__lowerCAmelCase: Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__lowerCAmelCase: Optional[int] = default_version.base_version
elif patch:
__lowerCAmelCase: List[str] = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowerCAmelCase: Tuple = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowerCAmelCase: Tuple = input(F"Which version are you releasing? [{default_version}]" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
__lowerCAmelCase: Union[str, Any] = default_version
print(F"Updating version to {version}." )
global_version_update(__SCREAMING_SNAKE_CASE , patch=__SCREAMING_SNAKE_CASE )
def a__ ( ) -> List[Any]:
__lowerCAmelCase: Tuple = get_version()
__lowerCAmelCase: str = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowerCAmelCase: Tuple = current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase: List[str] = input(F"Which version are we developing now? [{dev_version}]" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
__lowerCAmelCase: Optional[Any] = dev_version
print(F"Updating version to {version}." )
global_version_update(__SCREAMING_SNAKE_CASE )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__A = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 217
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class snake_case :
def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=0.2 , UpperCamelCase__ : Any=0.2)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = bp_numa
__lowerCAmelCase: Optional[int] = bp_numa
__lowerCAmelCase: Tuple = bp_numa
__lowerCAmelCase: Optional[int] = conva_get[:2]
__lowerCAmelCase: int = conva_get[2]
__lowerCAmelCase: List[str] = size_pa
__lowerCAmelCase: Tuple = rate_w
__lowerCAmelCase: Dict = rate_t
__lowerCAmelCase: List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
__lowerCAmelCase: Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
__lowerCAmelCase: int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
__lowerCAmelCase: Optional[Any] = -2 * np.random.rand(self.conva[1]) + 1
__lowerCAmelCase: int = -2 * np.random.rand(self.num_bpa) + 1
__lowerCAmelCase: str = -2 * np.random.rand(self.num_bpa) + 1
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : int)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Any = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(UpperCamelCase__ , "wb") as f:
pickle.dump(UpperCamelCase__ , UpperCamelCase__)
print(f"Model saved: {save_path}")
@classmethod
def lowercase_ ( cls : Dict , UpperCamelCase__ : Union[str, Any])-> List[Any]:
'''simple docstring'''
with open(UpperCamelCase__ , "rb") as f:
__lowerCAmelCase: Dict = pickle.load(UpperCamelCase__) # noqa: S301
__lowerCAmelCase: Optional[int] = model_dic.get("conv1")
conv_get.append(model_dic.get("step_conv1"))
__lowerCAmelCase: List[str] = model_dic.get("size_pooling1")
__lowerCAmelCase: Union[str, Any] = model_dic.get("num_bp1")
__lowerCAmelCase: Any = model_dic.get("num_bp2")
__lowerCAmelCase: Union[str, Any] = model_dic.get("num_bp3")
__lowerCAmelCase: Optional[int] = model_dic.get("rate_weight")
__lowerCAmelCase: int = model_dic.get("rate_thre")
# create model instance
__lowerCAmelCase: Tuple = CNN(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
# modify model parameter
__lowerCAmelCase: Any = model_dic.get("w_conv1")
__lowerCAmelCase: Optional[Any] = model_dic.get("wkj")
__lowerCAmelCase: Any = model_dic.get("vji")
__lowerCAmelCase: Dict = model_dic.get("thre_conv1")
__lowerCAmelCase: int = model_dic.get("thre_bp2")
__lowerCAmelCase: Optional[int] = model_dic.get("thre_bp3")
return conv_ins
def lowercase_ ( self : Dict , UpperCamelCase__ : List[Any])-> List[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x))
def lowercase_ ( self : Dict , UpperCamelCase__ : List[Any])-> Optional[Any]:
'''simple docstring'''
return round(UpperCamelCase__ , 3)
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int)-> Dict:
'''simple docstring'''
__lowerCAmelCase: List[Any] = convs[0]
__lowerCAmelCase: int = convs[1]
__lowerCAmelCase: Union[str, Any] = np.shape(UpperCamelCase__)[0]
# get the data slice of original image data, data_focus
__lowerCAmelCase: Optional[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , UpperCamelCase__):
for j_focus in range(0 , size_data - size_conv + 1 , UpperCamelCase__):
__lowerCAmelCase: Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(UpperCamelCase__)
# calculate the feature map of every single kernel, and saved as list of matrix
__lowerCAmelCase: int = []
__lowerCAmelCase: Optional[int] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(UpperCamelCase__):
__lowerCAmelCase: List[str] = []
for i_focus in range(len(UpperCamelCase__)):
__lowerCAmelCase: Union[str, Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(UpperCamelCase__))
__lowerCAmelCase: str = np.asmatrix(UpperCamelCase__).reshape(
UpperCamelCase__ , UpperCamelCase__)
data_featuremap.append(UpperCamelCase__)
# expanding the data slice to One dimenssion
__lowerCAmelCase: Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(UpperCamelCase__))
__lowerCAmelCase: List[Any] = np.asarray(UpperCamelCase__)
return focus_list, data_featuremap
def lowercase_ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]="average_pool")-> str:
'''simple docstring'''
__lowerCAmelCase: Tuple = len(featuremaps[0])
__lowerCAmelCase: List[Any] = int(size_map / size_pooling)
__lowerCAmelCase: int = []
for i_map in range(len(UpperCamelCase__)):
__lowerCAmelCase: str = featuremaps[i_map]
__lowerCAmelCase: List[Any] = []
for i_focus in range(0 , UpperCamelCase__ , UpperCamelCase__):
for j_focus in range(0 , UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: Any = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(UpperCamelCase__))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(UpperCamelCase__))
__lowerCAmelCase: Optional[int] = np.asmatrix(UpperCamelCase__).reshape(UpperCamelCase__ , UpperCamelCase__)
featuremap_pooled.append(UpperCamelCase__)
return featuremap_pooled
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : str)-> int:
'''simple docstring'''
__lowerCAmelCase: List[Any] = []
for i in range(len(UpperCamelCase__)):
__lowerCAmelCase: Union[str, Any] = np.shape(data[i])
__lowerCAmelCase: int = data[i].reshape(1 , shapes[0] * shapes[1])
__lowerCAmelCase: Dict = data_listed.getA().tolist()[0]
data_expanded.extend(UpperCamelCase__)
__lowerCAmelCase: Any = np.asarray(UpperCamelCase__)
return data_expanded
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = np.asarray(UpperCamelCase__)
__lowerCAmelCase: Optional[int] = np.shape(UpperCamelCase__)
__lowerCAmelCase: Optional[int] = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def lowercase_ ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = []
__lowerCAmelCase: Any = 0
for i_map in range(UpperCamelCase__):
__lowerCAmelCase: Optional[Any] = np.ones((size_map, size_map))
for i in range(0 , UpperCamelCase__ , UpperCamelCase__):
for j in range(0 , UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: Optional[Any] = pd_pool[
i_pool
]
__lowerCAmelCase: str = i_pool + 1
__lowerCAmelCase: Dict = np.multiply(
UpperCamelCase__ , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(UpperCamelCase__)
return pd_all
def lowercase_ ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str=bool)-> List[str]:
'''simple docstring'''
print("----------------------Start Training-------------------------")
print((" - - Shape: Train_Data ", np.shape(UpperCamelCase__)))
print((" - - Shape: Teach_Data ", np.shape(UpperCamelCase__)))
__lowerCAmelCase: str = 0
__lowerCAmelCase: Optional[int] = []
__lowerCAmelCase: List[Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
__lowerCAmelCase: Optional[Any] = 0
print(f"-------------Learning Time {rp}--------------")
for p in range(len(UpperCamelCase__)):
# print('------------Learning Image: %d--------------'%p)
__lowerCAmelCase: Dict = np.asmatrix(datas_train[p])
__lowerCAmelCase: Dict = np.asarray(datas_teach[p])
__lowerCAmelCase , __lowerCAmelCase: int = self.convolute(
UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowerCAmelCase: Any = self.pooling(UpperCamelCase__ , self.size_poolinga)
__lowerCAmelCase: Optional[Any] = np.shape(UpperCamelCase__)
__lowerCAmelCase: str = self._expand(UpperCamelCase__)
__lowerCAmelCase: str = data_bp_input
__lowerCAmelCase: int = np.dot(UpperCamelCase__ , self.vji.T) - self.thre_bpa
__lowerCAmelCase: int = self.sig(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = np.dot(UpperCamelCase__ , self.wkj.T) - self.thre_bpa
__lowerCAmelCase: str = self.sig(UpperCamelCase__)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
__lowerCAmelCase: Union[str, Any] = np.multiply(
(data_teach - bp_outa) , np.multiply(UpperCamelCase__ , (1 - bp_outa)))
__lowerCAmelCase: Any = np.multiply(
np.dot(UpperCamelCase__ , self.wkj) , np.multiply(UpperCamelCase__ , (1 - bp_outa)))
__lowerCAmelCase: str = np.dot(UpperCamelCase__ , self.vji)
__lowerCAmelCase: Union[str, Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
__lowerCAmelCase: str = pd_conva_pooled.T.getA().tolist()
__lowerCAmelCase: str = self._calculate_gradient_from_pool(
UpperCamelCase__ , UpperCamelCase__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
__lowerCAmelCase: List[Any] = self._expand_mat(pd_conva_all[k_conv])
__lowerCAmelCase: int = self.rate_weight * np.dot(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: Tuple = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
__lowerCAmelCase: Tuple = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
__lowerCAmelCase: List[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
__lowerCAmelCase: Union[str, Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
__lowerCAmelCase: Tuple = self.thre_bpa - pd_k_all * self.rate_thre
__lowerCAmelCase: Optional[int] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
__lowerCAmelCase: List[str] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
__lowerCAmelCase: Tuple = rp + 1
__lowerCAmelCase: Optional[Any] = error_count / patterns
all_mse.append(UpperCamelCase__)
def draw_error():
__lowerCAmelCase: Dict = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(UpperCamelCase__ , "+-")
plt.plot(UpperCamelCase__ , "r--")
plt.xlabel("Learning Times")
plt.ylabel("All_mse")
plt.grid(UpperCamelCase__ , alpha=0.5)
plt.show()
print("------------------Training Complished---------------------")
print((" - - Training epoch: ", rp, f" - - Mse: {mse:.6f}"))
if draw_e:
draw_error()
return mse
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : Tuple)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: int = []
print("-------------------Start Testing-------------------------")
print((" - - Shape: Test_Data ", np.shape(UpperCamelCase__)))
for p in range(len(UpperCamelCase__)):
__lowerCAmelCase: Dict = np.asmatrix(datas_test[p])
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = self.convolute(
UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowerCAmelCase: Tuple = self.pooling(UpperCamelCase__ , self.size_poolinga)
__lowerCAmelCase: List[str] = self._expand(UpperCamelCase__)
__lowerCAmelCase: int = data_bp_input
__lowerCAmelCase: List[Any] = bp_outa * self.vji.T - self.thre_bpa
__lowerCAmelCase: Any = self.sig(UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = bp_outa * self.wkj.T - self.thre_bpa
__lowerCAmelCase: List[str] = self.sig(UpperCamelCase__)
produce_out.extend(bp_outa.getA().tolist())
__lowerCAmelCase: Tuple = [list(map(self.do_round , UpperCamelCase__)) for each in produce_out]
return np.asarray(UpperCamelCase__)
def lowercase_ ( self : int , UpperCamelCase__ : Any)-> Any:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = np.asmatrix(UpperCamelCase__)
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.convolute(
UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowerCAmelCase: Any = self.pooling(UpperCamelCase__ , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 217
| 1
|
"""simple docstring"""
def lowerCamelCase__ ( __snake_case, __snake_case ) -> float:
"""simple docstring"""
_validate_point(__snake_case )
_validate_point(__snake_case )
if len(__snake_case ) != len(__snake_case ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(__snake_case, __snake_case ) ) )
def lowerCamelCase__ ( __snake_case ) -> None:
"""simple docstring"""
if point:
if isinstance(__snake_case, __snake_case ):
for item in point:
if not isinstance(__snake_case, (int, float) ):
_UpperCamelCase = (
'''Expected a list of numbers as input, found '''
F'''{type(__snake_case ).__name__}'''
)
raise TypeError(__snake_case )
else:
_UpperCamelCase = F'''Expected a list of numbers as input, found {type(__snake_case ).__name__}'''
raise TypeError(__snake_case )
else:
raise ValueError('''Missing an input''' )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> float:
"""simple docstring"""
_validate_point(__snake_case )
_validate_point(__snake_case )
if len(__snake_case ) != len(__snake_case ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(__snake_case, __snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a , __a , __a = None , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(transformer=__a , vae=__a , scheduler=__a)
# create a imagenet -> id dictionary for easier use
_UpperCamelCase = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''','''):
_UpperCamelCase = int(__a)
_UpperCamelCase = dict(sorted(self.labels.items()))
def UpperCAmelCase ( self , __a) -> List[int]:
'''simple docstring'''
if not isinstance(__a , __a):
_UpperCamelCase = list(__a)
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''')
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , __a , __a = 4.0 , __a = None , __a = 50 , __a = "pil" , __a = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
_UpperCamelCase = len(__a)
_UpperCamelCase = self.transformer.config.sample_size
_UpperCamelCase = self.transformer.config.in_channels
_UpperCamelCase = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__a , device=self.device , dtype=self.transformer.dtype , )
_UpperCamelCase = torch.cat([latents] * 2) if guidance_scale > 1 else latents
_UpperCamelCase = torch.tensor(__a , device=self.device).reshape(-1)
_UpperCamelCase = torch.tensor([10_00] * batch_size , device=self.device)
_UpperCamelCase = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__a)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
_UpperCamelCase = latent_model_input[: len(__a) // 2]
_UpperCamelCase = torch.cat([half, half] , dim=0)
_UpperCamelCase = self.scheduler.scale_model_input(__a , __a)
_UpperCamelCase = t
if not torch.is_tensor(__a):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_UpperCamelCase = latent_model_input.device.type == '''mps'''
if isinstance(__a , __a):
_UpperCamelCase = torch.floataa if is_mps else torch.floataa
else:
_UpperCamelCase = torch.intaa if is_mps else torch.intaa
_UpperCamelCase = torch.tensor([timesteps] , dtype=__a , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
_UpperCamelCase = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCamelCase = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
_UpperCamelCase = self.transformer(
__a , timestep=__a , class_labels=__a).sample
# perform guidance
if guidance_scale > 1:
_UpperCamelCase , _UpperCamelCase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_UpperCamelCase , _UpperCamelCase = torch.split(__a , len(__a) // 2 , dim=0)
_UpperCamelCase = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_UpperCamelCase = torch.cat([half_eps, half_eps] , dim=0)
_UpperCamelCase = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_UpperCamelCase , _UpperCamelCase = torch.split(__a , __a , dim=1)
else:
_UpperCamelCase = noise_pred
# compute previous image: x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(__a , __a , __a).prev_sample
if guidance_scale > 1:
_UpperCamelCase , _UpperCamelCase = latent_model_input.chunk(2 , dim=0)
else:
_UpperCamelCase = latent_model_input
_UpperCamelCase = 1 / self.vae.config.scaling_factor * latents
_UpperCamelCase = self.vae.decode(__a).sample
_UpperCamelCase = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_UpperCamelCase = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__a)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__a)
| 100
| 1
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCamelCase__ ( ) -> Dict:
raise RuntimeError('''CUDA out of memory.''' )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : List[Any] ):
"""simple docstring"""
super().__init__()
__snake_case = nn.Linear(3 , 4 )
__snake_case = nn.BatchNormad(4 )
__snake_case = nn.Linear(4 , 5 )
def a (self : Optional[Any] , a__ : List[str] ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(a__ ) ) )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Any ):
"""simple docstring"""
__snake_case = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(a__ : Any ):
nonlocal batch_sizes
batch_sizes.append(a__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(a__ , [128, 64, 32, 16, 8] )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(a__ : str , a__ : Tuple ):
nonlocal batch_sizes
batch_sizes.append(a__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__snake_case , __snake_case = mock_training_loop_function('''hello''' )
self.assertListEqual(a__ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def a (self : List[Any] ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(a__ : Optional[int] ):
pass
with self.assertRaises(a__ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def a (self : Tuple ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(a__ : str ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(a__ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def a (self : int ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(a__ : int , a__ : Tuple , a__ : int ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(a__ ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def a (self : Optional[Any] ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(a__ : Optional[Any] ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(a__ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = torch.cuda.memory_allocated()
__snake_case = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , a__ )
__snake_case = release_memory(a__ )
self.assertEqual(torch.cuda.memory_allocated() , a__ )
| 24
|
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def a ( *_UpperCAmelCase : List[str] ):
'''simple docstring'''
with open(_UpperCAmelCase , '''r''' ) as fh:
fcntl.flock(_UpperCAmelCase , fcntl.LOCK_EX )
try:
print(*_UpperCAmelCase )
finally:
fcntl.flock(_UpperCAmelCase , fcntl.LOCK_UN )
__A =int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__A =torch.device("cuda", local_rank)
__A =socket.gethostname()
__A =f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__A =dist.get_rank()
__A =dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 226
| 0
|
'''simple docstring'''
def lowerCamelCase__ ( A : list ):
'''simple docstring'''
if len(A ) < 2:
return collection
def circle_sort_util(A : list , A : int , A : int ) -> bool:
UpperCAmelCase = False
if low == high:
return swapped
UpperCAmelCase = low
UpperCAmelCase = high
while left < right:
if collection[left] > collection[right]:
UpperCAmelCase , UpperCAmelCase = (
collection[right],
collection[left],
)
UpperCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
UpperCAmelCase , UpperCAmelCase = (
collection[right + 1],
collection[left],
)
UpperCAmelCase = True
UpperCAmelCase = low + int((high - low) / 2 )
UpperCAmelCase = circle_sort_util(A , A , A )
UpperCAmelCase = circle_sort_util(A , mid + 1 , A )
return swapped or left_swap or right_swap
UpperCAmelCase = True
while is_not_sorted is True:
UpperCAmelCase = circle_sort_util(A , 0 , len(A ) - 1 )
return collection
if __name__ == "__main__":
_lowercase : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
_lowercase : Dict = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 91
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Any = ShapEPipeline
__magic_name__ : Tuple = ["prompt"]
__magic_name__ : Optional[int] = ["prompt"]
__magic_name__ : Dict = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__magic_name__ : Optional[int] = False
@property
def a__( self : Optional[Any] )-> Dict:
"""simple docstring"""
return 32
@property
def a__( self : Dict )-> Dict:
"""simple docstring"""
return 32
@property
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def a__( self : List[str] )-> str:
"""simple docstring"""
return 8
@property
def a__( self : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def a__( self : Tuple )-> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCAmelCase )
@property
def a__( self : str )-> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
UpperCAmelCase = PriorTransformer(**lowerCAmelCase )
return model
@property
def a__( self : List[Any] )-> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase = ShapERenderer(**lowerCAmelCase )
return model
def a__( self : Any )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.dummy_prior
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = self.dummy_tokenizer
UpperCAmelCase = self.dummy_renderer
UpperCAmelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=lowerCAmelCase , clip_sample=lowerCAmelCase , clip_sample_range=1.0 , )
UpperCAmelCase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def a__( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any]=0 )-> Optional[Any]:
"""simple docstring"""
if str(lowerCAmelCase ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = '''cpu'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase )
UpperCAmelCase = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = pipe(**self.get_dummy_inputs(lowerCAmelCase ) )
UpperCAmelCase = output.images[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = torch_device == '''cpu'''
UpperCAmelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase , relax_max_difference=lowerCAmelCase , )
def a__( self : int )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase )
UpperCAmelCase = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = 1
UpperCAmelCase = 2
UpperCAmelCase = self.get_dummy_inputs(lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase = batch_size * [inputs[key]]
UpperCAmelCase = pipe(**lowerCAmelCase , num_images_per_prompt=lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Dict )-> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
UpperCAmelCase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
UpperCAmelCase = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
UpperCAmelCase = pipe(
'''a shark''' , generator=lowerCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
| 91
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Dict = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = """layoutlmv3"""
def __init__( self : int , snake_case_ : List[Any]=5_0_2_6_5 , snake_case_ : int=7_6_8 , snake_case_ : Optional[int]=1_2 , snake_case_ : Dict=1_2 , snake_case_ : Union[str, Any]=3_0_7_2 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Optional[Any]=0.1 , snake_case_ : List[Any]=5_1_2 , snake_case_ : Any=2 , snake_case_ : Any=0.0_2 , snake_case_ : Union[str, Any]=1e-5 , snake_case_ : Tuple=1 , snake_case_ : Any=0 , snake_case_ : Optional[int]=2 , snake_case_ : List[str]=1_0_2_4 , snake_case_ : int=1_2_8 , snake_case_ : List[Any]=1_2_8 , snake_case_ : List[str]=True , snake_case_ : int=3_2 , snake_case_ : Union[str, Any]=1_2_8 , snake_case_ : Dict=6_4 , snake_case_ : int=2_5_6 , snake_case_ : Tuple=True , snake_case_ : str=True , snake_case_ : Tuple=True , snake_case_ : Optional[Any]=2_2_4 , snake_case_ : List[str]=3 , snake_case_ : Tuple=1_6 , snake_case_ : Any=None , **snake_case_ : Union[str, Any] , ):
super().__init__(
vocab_size=snake_case_ , hidden_size=snake_case_ , num_hidden_layers=snake_case_ , num_attention_heads=snake_case_ , intermediate_size=snake_case_ , hidden_act=snake_case_ , hidden_dropout_prob=snake_case_ , attention_probs_dropout_prob=snake_case_ , max_position_embeddings=snake_case_ , type_vocab_size=snake_case_ , initializer_range=snake_case_ , layer_norm_eps=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
_UpperCAmelCase = max_ad_position_embeddings
_UpperCAmelCase = coordinate_size
_UpperCAmelCase = shape_size
_UpperCAmelCase = has_relative_attention_bias
_UpperCAmelCase = rel_pos_bins
_UpperCAmelCase = max_rel_pos
_UpperCAmelCase = has_spatial_attention_bias
_UpperCAmelCase = rel_ad_pos_bins
_UpperCAmelCase = max_rel_ad_pos
_UpperCAmelCase = text_embed
_UpperCAmelCase = visual_embed
_UpperCAmelCase = input_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = classifier_dropout
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Tuple = version.parse("""1.12""" )
@property
def lowercase ( self : List[str] ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
] )
@property
def lowercase ( self : Union[str, Any] ):
return 1e-5
@property
def lowercase ( self : Dict ):
return 1_2
def lowercase ( self : Optional[Any] , snake_case_ : "ProcessorMixin" , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional["TensorType"] = None , snake_case_ : int = 3 , snake_case_ : int = 4_0 , snake_case_ : int = 4_0 , ):
setattr(processor.image_processor , "apply_ocr" , snake_case_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(snake_case_ )
_UpperCAmelCase = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_UpperCAmelCase = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_UpperCAmelCase = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_UpperCAmelCase = dict(
processor(
snake_case_ , text=snake_case_ , boxes=snake_case_ , return_tensors=snake_case_ , ) )
return inputs
| 22
|
'''simple docstring'''
def __UpperCAmelCase ( A : int = 1_0_0_0 ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = 1, 1
UpperCAmelCase_ : Dict = []
for i in range(1 , n + 1 ):
UpperCAmelCase_ : Optional[int] = prev_numerator + 2 * prev_denominator
UpperCAmelCase_ : Tuple = prev_numerator + prev_denominator
if len(str(A ) ) > len(str(A ) ):
result.append(A )
UpperCAmelCase_ : Optional[Any] = numerator
UpperCAmelCase_ : Optional[int] = denominator
return len(A )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 304
| 0
|
'''simple docstring'''
from pathlib import Path
import fire
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
_A = Path(__lowercase )
_A = Path(__lowercase )
dest_dir.mkdir(exist_ok=__lowercase )
for path in src_dir.iterdir():
_A = [x.rstrip() for x in list(path.open().readlines() )][:n]
_A = dest_dir.joinpath(path.name )
print(__lowercase )
dest_path.open("w" ).write("\n".join(__lowercase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 361
|
'''simple docstring'''
import os
lowerCamelCase_ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
_A = 0
_A = 0
while index < len(__lowercase ) - 1:
_A = SYMBOLS[numerals[index]]
_A = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
_A = ""
_A = num // 1000
numerals += m_count * "M"
num %= 1000
_A = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_A = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __lowercase ( __lowercase = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_A = 0
with open(os.path.dirname(__lowercase ) + roman_numerals_filename ) as filea:
_A = filea.readlines()
for line in lines:
_A = line.strip()
_A = parse_roman_numerals(__lowercase )
_A = generate_roman_numerals(__lowercase )
savings += len(__lowercase ) - len(__lowercase )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 174
| 0
|
from __future__ import annotations
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase) -> None:
__UpperCamelCase :str = data
__UpperCamelCase :Any = None
__UpperCamelCase :Optional[int] = None
def lowerCamelCase ( SCREAMING_SNAKE_CASE ): # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCamelCase ( ): # Main function for testing.
'''simple docstring'''
__UpperCamelCase :List[Any] = Node(1 )
__UpperCamelCase :List[Any] = Node(2 )
__UpperCamelCase :List[str] = Node(3 )
__UpperCamelCase :int = Node(4 )
__UpperCamelCase :Optional[Any] = Node(5 )
__UpperCamelCase :int = Node(6 )
__UpperCamelCase :int = Node(7 )
__UpperCamelCase :Any = Node(8 )
__UpperCamelCase :str = Node(9 )
print(is_full_binary_tree(SCREAMING_SNAKE_CASE__ ) )
print(depth_of_tree(SCREAMING_SNAKE_CASE__ ) )
print('''Tree is: ''' )
display(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 43
|
_A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ):
__UpperCamelCase =True
__UpperCamelCase =[]
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
order.append(SCREAMING_SNAKE_CASE__ )
return order
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ):
__UpperCamelCase =True
__UpperCamelCase =[vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return component
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] ):
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False]
__UpperCamelCase ={vert: [] for vert in range(len(SCREAMING_SNAKE_CASE__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
for i, was_visited in enumerate(SCREAMING_SNAKE_CASE__ ):
if not was_visited:
order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False]
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =order[len(SCREAMING_SNAKE_CASE__ ) - i - 1]
if not visited[vert]:
__UpperCamelCase =find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
components_list.append(SCREAMING_SNAKE_CASE__ )
return components_list
| 62
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class a__( unittest.TestCase ):
def lowercase_ ( self : List[Any] ):
a : List[Any] = tempfile.mkdtemp()
# fmt: off
a : int = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
a : int = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
a : List[str] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
a : Optional[Any] = {'unk_token': '<unk>'}
a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
a : Optional[int] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
a : Union[str, Any] = os.path.join(self.tmpdirname , __snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__snake_case , __snake_case )
def lowercase_ ( self : Union[str, Any] , **__snake_case : Optional[Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase_ ( self : Any , **__snake_case : List[str] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase_ ( self : int , **__snake_case : Any ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase_ ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : int ):
a : List[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a : Optional[Any] = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Union[str, Any] ):
a : Any = self.get_tokenizer()
a : List[str] = self.get_rust_tokenizer()
a : str = self.get_image_processor()
a : Tuple = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_slow.save_pretrained(self.tmpdirname )
a : str = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__snake_case )
a : List[str] = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_fast.save_pretrained(self.tmpdirname )
a : List[Any] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __snake_case )
self.assertIsInstance(processor_fast.tokenizer , __snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __snake_case )
self.assertIsInstance(processor_fast.image_processor , __snake_case )
def lowercase_ ( self : Union[str, Any] ):
a : Optional[Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
a : Dict = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 )
a : List[Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def lowercase_ ( self : Tuple ):
a : int = self.get_image_processor()
a : int = self.get_tokenizer()
a : Dict = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
a : Any = self.prepare_image_inputs()
a : str = image_processor(__snake_case , return_tensors='np' )
a : Tuple = processor(images=__snake_case , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ):
a : Optional[int] = self.get_image_processor()
a : str = self.get_tokenizer()
a : Dict = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
a : List[Any] = 'lower newer'
a : Union[str, Any] = processor(text=__snake_case )
a : Dict = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Optional[Any] ):
a : Optional[Any] = self.get_image_processor()
a : Any = self.get_tokenizer()
a : List[Any] = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
a : List[Any] = 'lower newer'
a : Optional[Any] = self.prepare_image_inputs()
a : Union[str, Any] = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def lowercase_ ( self : Optional[int] ):
a : Union[str, Any] = self.get_image_processor()
a : int = self.get_tokenizer()
a : Union[str, Any] = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
a : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a : str = processor.batch_decode(__snake_case )
a : Optional[Any] = tokenizer.batch_decode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowercase_ ( self : str ):
a : List[Any] = self.get_image_processor()
a : Tuple = self.get_tokenizer()
a : Any = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
a : Any = 'lower newer'
a : str = self.prepare_image_inputs()
a : List[str] = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 356
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a__( lowerCamelCase__ ):
lowercase__ = 42
@flax_register_to_config
class a__( nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ = 32
lowercase__ = 4
lowercase__ = 4
lowercase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowercase__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
lowercase__ = False
lowercase__ = (3_20, 6_40, 12_80, 12_80)
lowercase__ = 2
lowercase__ = 8
lowercase__ = None
lowercase__ = 12_80
lowercase__ = 0.0
lowercase__ = False
lowercase__ = jnp.floataa
lowercase__ = True
lowercase__ = 0
lowercase__ = False
def lowercase_ ( self : List[Any] , __snake_case : jax.random.KeyArray ):
# init input tensors
a : Dict = (1, self.in_channels, self.sample_size, self.sample_size)
a : str = jnp.zeros(__snake_case , dtype=jnp.floataa )
a : Union[str, Any] = jnp.ones((1,) , dtype=jnp.intaa )
a : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
a , a : Optional[int] = jax.random.split(__snake_case )
a : Any = {'params': params_rng, 'dropout': dropout_rng}
return self.init(__snake_case , __snake_case , __snake_case , __snake_case )["params"]
def lowercase_ ( self : Union[str, Any] ):
a : int = self.block_out_channels
a : Tuple = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
a : str = self.num_attention_heads or self.attention_head_dim
# input
a : List[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
a : List[str] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
a : Optional[int] = FlaxTimestepEmbedding(__snake_case , dtype=self.dtype )
a : Optional[Any] = self.only_cross_attention
if isinstance(__snake_case , __snake_case ):
a : int = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__snake_case , __snake_case ):
a : str = (num_attention_heads,) * len(self.down_block_types )
# down
a : int = []
a : Dict = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
a : List[Any] = output_channel
a : Dict = block_out_channels[i]
a : Union[str, Any] = i == len(__snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
a : Dict = FlaxCrossAttnDownBlockaD(
in_channels=__snake_case , out_channels=__snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
a : List[str] = FlaxDownBlockaD(
in_channels=__snake_case , out_channels=__snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__snake_case )
a : Dict = down_blocks
# mid
a : Union[str, Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
a : List[Any] = []
a : Dict = list(reversed(__snake_case ) )
a : List[str] = list(reversed(__snake_case ) )
a : List[str] = list(reversed(__snake_case ) )
a : Optional[int] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
a : Dict = output_channel
a : Dict = reversed_block_out_channels[i]
a : Dict = reversed_block_out_channels[min(i + 1 , len(__snake_case ) - 1 )]
a : List[Any] = i == len(__snake_case ) - 1
if up_block_type == "CrossAttnUpBlock2D":
a : List[Any] = FlaxCrossAttnUpBlockaD(
in_channels=__snake_case , out_channels=__snake_case , prev_output_channel=__snake_case , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
a : Dict = FlaxUpBlockaD(
in_channels=__snake_case , out_channels=__snake_case , prev_output_channel=__snake_case , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__snake_case )
a : Dict = output_channel
a : int = up_blocks
# out
a : int = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
a : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Tuple , __snake_case : Dict=None , __snake_case : Any=None , __snake_case : bool = True , __snake_case : bool = False , ):
# 1. time
if not isinstance(__snake_case , jnp.ndarray ):
a : Tuple = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
a : Tuple = timesteps.astype(dtype=jnp.floataa )
a : Optional[int] = jnp.expand_dims(__snake_case , 0 )
a : Optional[Any] = self.time_proj(__snake_case )
a : Optional[int] = self.time_embedding(__snake_case )
# 2. pre-process
a : str = jnp.transpose(__snake_case , (0, 2, 3, 1) )
a : Optional[int] = self.conv_in(__snake_case )
# 3. down
a : Optional[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(__snake_case , __snake_case ):
a , a : Any = down_block(__snake_case , __snake_case , __snake_case , deterministic=not train )
else:
a , a : Optional[Any] = down_block(__snake_case , __snake_case , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
a : Tuple = ()
for down_block_res_sample, down_block_additional_residual in zip(
__snake_case , __snake_case ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
a : Tuple = new_down_block_res_samples
# 4. mid
a : int = self.mid_block(__snake_case , __snake_case , __snake_case , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
a : int = down_block_res_samples[-(self.layers_per_block + 1) :]
a : List[Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__snake_case , __snake_case ):
a : Optional[int] = up_block(
__snake_case , temb=__snake_case , encoder_hidden_states=__snake_case , res_hidden_states_tuple=__snake_case , deterministic=not train , )
else:
a : Optional[Any] = up_block(__snake_case , temb=__snake_case , res_hidden_states_tuple=__snake_case , deterministic=not train )
# 6. post-process
a : Any = self.conv_norm_out(__snake_case )
a : List[str] = nn.silu(__snake_case )
a : Optional[int] = self.conv_out(__snake_case )
a : Any = jnp.transpose(__snake_case , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__snake_case )
| 96
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
|
from functools import lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
__a = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.add(_SCREAMING_SNAKE_CASE )
return factors
@lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return len(unique_prime_factors(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
return len(set(_SCREAMING_SNAKE_CASE ) ) in (0, 1)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
while True:
# Increment each value of a generated range
__a = [base + i for i in range(_SCREAMING_SNAKE_CASE )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__a = [upf_len(_SCREAMING_SNAKE_CASE ) for x in group]
checker.append(_SCREAMING_SNAKE_CASE )
# If all numbers in the list are equal, return the group variable.
if equality(_SCREAMING_SNAKE_CASE ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 4 ):
"""simple docstring"""
__a = run(_SCREAMING_SNAKE_CASE )
return results[0] if len(_SCREAMING_SNAKE_CASE ) else None
if __name__ == "__main__":
print(solution())
| 302
| 1
|
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def UpperCamelCase (lowercase_: List[Any] ) -> Dict:
A__ : List[Any] = SwinConfig(image_size=192 )
if "base" in model_name:
A__ : Any = 6
A__ : List[Any] = 128
A__ : Tuple = (2, 2, 18, 2)
A__ : Optional[int] = (4, 8, 16, 32)
elif "large" in model_name:
A__ : Dict = 12
A__ : int = 192
A__ : Dict = (2, 2, 18, 2)
A__ : Any = (6, 12, 24, 48)
else:
raise ValueError("""Model not supported, only supports base and large variants""" )
A__ : Tuple = window_size
A__ : int = embed_dim
A__ : str = depths
A__ : Any = num_heads
return config
def UpperCamelCase (lowercase_: Tuple ) -> Union[str, Any]:
if "encoder.mask_token" in name:
A__ : Optional[Any] = name.replace("""encoder.mask_token""" , """embeddings.mask_token""" )
if "encoder.patch_embed.proj" in name:
A__ : Optional[Any] = name.replace("""encoder.patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "encoder.patch_embed.norm" in name:
A__ : Union[str, Any] = name.replace("""encoder.patch_embed.norm""" , """embeddings.norm""" )
if "attn.proj" in name:
A__ : Optional[Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
A__ : List[Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
A__ : Tuple = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
A__ : List[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
A__ : Union[str, Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
A__ : int = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
A__ : Dict = """layernorm.weight"""
if name == "encoder.norm.bias":
A__ : Optional[int] = """layernorm.bias"""
if "decoder" in name:
pass
else:
A__ : str = """swin.""" + name
return name
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: int ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
A__ : int = orig_state_dict.pop(lowercase_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
A__ : Union[str, Any] = key.split(""".""" )
A__ : Optional[Any] = int(key_split[2] )
A__ : str = int(key_split[4] )
A__ : str = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A__ : int = val[:dim, :]
A__ : List[str] = val[
dim : dim * 2, :
]
A__ : Any = val[-dim:, :]
else:
A__ : Union[str, Any] = val[
:dim
]
A__ : Any = val[
dim : dim * 2
]
A__ : Union[str, Any] = val[
-dim:
]
else:
A__ : Union[str, Any] = val
return orig_state_dict
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: Any , lowercase_: Optional[int] , lowercase_: Any ) -> int:
A__ : Optional[Any] = torch.load(lowercase_ , map_location="""cpu""" )["""model"""]
A__ : str = get_swin_config(lowercase_ )
A__ : Union[str, Any] = SwinForMaskedImageModeling(lowercase_ )
model.eval()
A__ : Any = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
A__ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : Optional[Any] = ViTImageProcessor(size={"""height""": 192, """width""": 192} )
A__ : List[Any] = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
A__ : List[str] = image_processor(images=lowercase_ , return_tensors="""pt""" )
with torch.no_grad():
A__ : Union[str, Any] = model(**lowercase_ ).logits
print(outputs.keys() )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print(f"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(f"""microsoft/{model_name}""" )
image_processor.push_to_hub(f"""microsoft/{model_name}""" )
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A_ : Optional[int] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 141
|
import requests
A_ : List[Any] = 'YOUR API KEY'
def UpperCamelCase (lowercase_: str , lowercase_: str = giphy_api_key ) -> list:
A__ : Dict = """+""".join(query.split() )
A__ : Optional[int] = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
A__ : Any = requests.get(lowercase_ ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 141
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """num_attention_heads""" ) )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=6_40 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__="silu" , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=None , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : int = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = last_hidden_size
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Dict = conv_kernel_size
SCREAMING_SNAKE_CASE__ : int = output_stride
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = classifier_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = use_labels
SCREAMING_SNAKE_CASE__ : str = is_training
SCREAMING_SNAKE_CASE__ : str = num_labels
SCREAMING_SNAKE_CASE__ : Dict = initializer_range
SCREAMING_SNAKE_CASE__ : str = scope
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ (self ) -> str:
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = MobileViTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Dict = MobileViTForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : int = MobileViTForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Dict = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase : List[str] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase : str = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Any = False
__UpperCamelCase : int = False
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE__ : Dict = MobileViTConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
pass
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __magic_name__ (self ) -> int:
"""simple docstring"""
pass
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : str = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE__ : Tuple = 2
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Any = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE__ )
@slow
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileViTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[str] = prepare_img()
SCREAMING_SNAKE_CASE__ : int = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
SCREAMING_SNAKE_CASE__ : Tuple = model.to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : str = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE__ : Tuple = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=SCREAMING_SNAKE_CASE__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE__ , target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE__ )
| 25
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_A : Optional[int] = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> None:
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 229
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 270
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not (isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
_snake_case = len(_SCREAMING_SNAKE_CASE )
_snake_case = len(_SCREAMING_SNAKE_CASE )
_snake_case = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
_snake_case = 0
_snake_case = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
_snake_case = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
_snake_case = i
_snake_case = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270
| 1
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _A : Tuple , _A : int=3 , _A : Tuple=32 , _A : Dict=3 , _A : Dict=10 , _A : Optional[int]=[10, 20, 30, 40] , _A : Optional[int]=[1, 1, 2, 1] , _A : Optional[Any]=True , _A : Optional[Any]=True , _A : Any="relu" , _A : str=3 , _A : Optional[Any]=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
__SCREAMING_SNAKE_CASE : str = batch_size
__SCREAMING_SNAKE_CASE : int = image_size
__SCREAMING_SNAKE_CASE : List[str] = num_channels
__SCREAMING_SNAKE_CASE : Dict = embeddings_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_sizes
__SCREAMING_SNAKE_CASE : Dict = depths
__SCREAMING_SNAKE_CASE : int = is_training
__SCREAMING_SNAKE_CASE : Optional[int] = use_labels
__SCREAMING_SNAKE_CASE : Dict = hidden_act
__SCREAMING_SNAKE_CASE : int = num_labels
__SCREAMING_SNAKE_CASE : str = scope
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(_A )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase__ ( self : int , _A : Tuple , _A : str , _A : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = TFResNetModel(config=_A )
__SCREAMING_SNAKE_CASE : int = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase__ ( self : str , _A : Dict , _A : str , _A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.num_labels
__SCREAMING_SNAKE_CASE : List[str] = TFResNetForImageClassification(_A )
__SCREAMING_SNAKE_CASE : List[Any] = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase_ = (
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = TFResNetModelTester(self )
__SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=_A , has_text_modality=_A )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Tuple = model_class(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Dict = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
def check_hidden_states_output(_A : List[Any] , _A : Optional[Any] , _A : str ):
__SCREAMING_SNAKE_CASE : str = model_class(_A )
__SCREAMING_SNAKE_CASE : Any = model(**self._prepare_for_class(_A , _A ) )
__SCREAMING_SNAKE_CASE : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : List[str] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__SCREAMING_SNAKE_CASE : List[str] = layer_type
__SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : Any = True
check_hidden_states_output(_A , _A , _A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Dict = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
__SCREAMING_SNAKE_CASE : Any = prepare_img()
__SCREAMING_SNAKE_CASE : str = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_A )
# verify the logits
__SCREAMING_SNAKE_CASE : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _A )
__SCREAMING_SNAKE_CASE : List[str] = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _A , atol=1e-4 ) )
| 303
|
import os
import sys
lowercase_ = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase_ = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoConfig.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoTokenizer.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModel.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModel.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*snake_case , **snake_case )
| 303
| 1
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> float:
"""simple docstring"""
snake_case_ : str = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCamelCase )] )
snake_case_ : List[str] = np.array(_UpperCamelCase )
snake_case_ : Union[str, Any] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _UpperCamelCase ) ) , x.transpose() ) , _UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> float:
"""simple docstring"""
snake_case_ : List[str] = (1, 2, 1)
snake_case_ : List[Any] = (1, 1, 0, 7)
snake_case_ : Any = SARIMAX(
_UpperCamelCase , exog=_UpperCamelCase , order=_UpperCamelCase , seasonal_order=_UpperCamelCase )
snake_case_ : Dict = model.fit(disp=_UpperCamelCase , maxiter=600 , method='''nm''' )
snake_case_ : str = model_fit.predict(1 , len(_UpperCamelCase ) , exog=[test_match] )
return result[0]
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> float:
"""simple docstring"""
snake_case_ : Optional[Any] = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_UpperCamelCase , _UpperCamelCase )
snake_case_ : str = regressor.predict(_UpperCamelCase )
return y_pred[0]
def lowerCamelCase_ ( _UpperCamelCase ) -> float:
"""simple docstring"""
train_user.sort()
snake_case_ : Optional[Any] = np.percentile(_UpperCamelCase , 25 )
snake_case_ : List[str] = np.percentile(_UpperCamelCase , 75 )
snake_case_ : int = qa - qa
snake_case_ : List[str] = qa - (iqr * 0.1)
return low_lim
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bool:
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
snake_case_ : Tuple = 0
for i in list_vote:
if i > actual_result:
snake_case_ : Dict = not_safe + 1
else:
if abs(abs(_UpperCamelCase ) - abs(_UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
lowerCAmelCase_ = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
lowerCAmelCase_ = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
lowerCAmelCase_ = Normalizer().fit_transform(data_input_df.values)
# split data
lowerCAmelCase_ = normalize_df[:, 2].tolist()
lowerCAmelCase_ = normalize_df[:, 0].tolist()
lowerCAmelCase_ = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
lowerCAmelCase_ = normalize_df[:, [1, 2]].tolist()
lowerCAmelCase_ = x[: len(x) - 1]
lowerCAmelCase_ = x[len(x) - 1 :]
# for linear regression & sarimax
lowerCAmelCase_ = total_date[: len(total_date) - 1]
lowerCAmelCase_ = total_user[: len(total_user) - 1]
lowerCAmelCase_ = total_match[: len(total_match) - 1]
lowerCAmelCase_ = total_date[len(total_date) - 1 :]
lowerCAmelCase_ = total_user[len(total_user) - 1 :]
lowerCAmelCase_ = total_match[len(total_match) - 1 :]
# voting system with forecasting
lowerCAmelCase_ = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
lowerCAmelCase_ = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 367
|
from math import isclose, sqrt
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> tuple[float, float, float]:
"""simple docstring"""
snake_case_ : Dict = point_y / 4 / point_x
snake_case_ : List[str] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
snake_case_ : Union[str, Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
snake_case_ : Tuple = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
snake_case_ : Union[str, Any] = outgoing_gradient**2 + 4
snake_case_ : Tuple = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
snake_case_ : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
snake_case_ : Dict = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
snake_case_ : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
snake_case_ : Any = x_minus if isclose(_UpperCamelCase , _UpperCamelCase ) else x_plus
snake_case_ : int = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowerCamelCase_ ( _UpperCamelCase = 1.4 , _UpperCamelCase = -9.6 ) -> int:
"""simple docstring"""
snake_case_ : int = 0
snake_case_ : float = first_x_coord
snake_case_ : float = first_y_coord
snake_case_ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
snake_case_ , snake_case_ , snake_case_ : str = next_point(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 279
| 0
|
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : int ):
_A , _A : Union[str, Any] = position
_A : str = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_A : Dict = []
for position in positions:
_A , _A : List[str] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(UpperCamelCase__ )
return permissible_positions
def _UpperCAmelCase (UpperCamelCase__ : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _UpperCAmelCase (UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : int ):
if is_complete(UpperCamelCase__ ):
return True
for position in get_valid_pos(UpperCamelCase__ , len(UpperCamelCase__ ) ):
_A , _A : Union[str, Any] = position
if board[y][x] == 0:
_A : Optional[Any] = curr + 1
if open_knight_tour_helper(UpperCamelCase__ , UpperCamelCase__ , curr + 1 ):
return True
_A : Union[str, Any] = 0
return False
def _UpperCAmelCase (UpperCamelCase__ : int ):
_A : Tuple = [[0 for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_A : Union[str, Any] = 1
if open_knight_tour_helper(UpperCamelCase__ , (i, j) , 1 ):
return board
_A : Any = 0
_A : Union[str, Any] = f"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11
|
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( lowerCamelCase ):
lowercase = (DDPMParallelScheduler,)
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def A__ ( self ) -> List[str]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = self.dummy_sample_deter + 0.1
UpperCamelCase = self.dummy_sample_deter - 0.1
UpperCamelCase = samplea.shape[0]
UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE )
UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
UpperCamelCase = -1
else:
UpperCamelCase = timesteps[i + 1]
UpperCamelCase = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
UpperCamelCase = prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
| 321
| 0
|
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict=28_123 ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
_UpperCAmelCase : Any = set()
_UpperCAmelCase : List[str] = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(SCREAMING_SNAKE_CASE__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 358
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCAmelCase : Any = "\nHuman: <<task>>\n\nAssistant: "
_lowerCAmelCase : str = "huggingface-tools/default-prompts"
_lowerCAmelCase : Union[str, Any] = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int="run" ) -> int:
'''simple docstring'''
if prompt_or_repo_id is None:
_UpperCAmelCase : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , SCREAMING_SNAKE_CASE__ ) is not None:
return prompt_or_repo_id
_UpperCAmelCase : Dict = cached_file(
SCREAMING_SNAKE_CASE__ , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(SCREAMING_SNAKE_CASE__ , "r" , encoding="utf-8" ) as f:
return f.read()
| 202
| 0
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any=None ) -> Tuple:
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, f'{torch_layer} layer.weight does not match'
lowercase = nn.Parameter(lowerCAmelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'{torch_layer} layer.bias does not match'
lowercase = nn.Parameter(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
lowercase = np.asarray(weights[0] )
lowercase = np.asarray(weights[1] )
lowercase = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase__ ).view(-1 , lowerCAmelCase__ ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict ) -> int:
'''simple docstring'''
lowercase = np.asarray(weights[0] )
lowercase = np.asarray(weights[1] )
lowercase = np.asarray(weights[2] )
lowercase = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase__ ).view(-1 , lowerCAmelCase__ ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> Dict:
'''simple docstring'''
lowercase = weights[0][0][0]
lowercase = np.asarray(layer_norm_a[0] )
lowercase = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCAmelCase__ ) , torch.tensor(lowerCAmelCase__ ) , )
# lsh weights + output
lowercase = weights[0][1]
if len(lowerCAmelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCAmelCase__ , torch_block.attention , lowerCAmelCase__ )
else:
set_layer_weights_in_torch_local(lowerCAmelCase__ , torch_block.attention , lowerCAmelCase__ )
# intermediate weighs
lowercase = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCAmelCase__ ) == 4:
lowercase = intermediate_weights[2]
# layernorm 2
lowercase = np.asarray(intermediate_weights[0][0] )
lowercase = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCAmelCase__ ) , torch.tensor(lowerCAmelCase__ ) , )
# intermediate dense
lowercase = np.asarray(intermediate_weights[1][0] )
lowercase = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase__ ) , )
# intermediate out
lowercase = np.asarray(intermediate_weights[4][0] )
lowercase = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase__ ) , )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :Any ) -> Union[str, Any]:
'''simple docstring'''
lowercase = torch_model.reformer
# word embeds
lowercase = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCAmelCase__ ) , )
if isinstance(weights[3] , lowerCAmelCase__ ):
lowercase = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowercase = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'{position_embeddings[emb_idx]} emb does not match'
lowercase = nn.Parameter(torch.tensor(lowerCAmelCase__ ) )
lowercase = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCAmelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowercase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# output layer norm
lowercase = np.asarray(weights[7][0] )
lowercase = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCAmelCase__ ) , torch.tensor(lowerCAmelCase__ ) , )
# output embeddings
lowercase = np.asarray(weights[9][0] )
lowercase = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase__ ) , )
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str ) -> Tuple:
'''simple docstring'''
lowercase = ReformerConfig.from_json_file(lowerCAmelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
lowercase = ReformerModelWithLMHead(lowerCAmelCase__ )
with open(lowerCAmelCase__ , """rb""" ) as f:
lowercase = pickle.load(lowerCAmelCase__ )["""weights"""]
set_model_weights_in_torch(lowerCAmelCase__ , lowerCAmelCase__ , config.hidden_size )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCAmelCase : Tuple =parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 197
|
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCamelCase : Any = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
lowerCamelCase : int = parser.parse_args()
if args.check_lib:
lowerCamelCase : Optional[int] = importlib.import_module('transformers')
lowerCamelCase : List[str] = Path(transformers_module.__file__).parent
else:
lowerCamelCase : Optional[int] = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 124
| 0
|
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__snake_case : Optional[int] = data_utils.TransfoXLTokenizer
__snake_case : Any = data_utils.TransfoXLCorpus
__snake_case : List[str] = data_utils
__snake_case : List[Any] = data_utils
def _UpperCAmelCase ( _UpperCamelCase : Optional[Any], _UpperCamelCase : int, _UpperCamelCase : Optional[Any], _UpperCamelCase : int ) -> Dict:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_UpperCamelCase, '''rb''' ) as fp:
A_ = pickle.load(_UpperCamelCase, encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
A_ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
A_ = corpus.vocab.__dict__
torch.save(_UpperCamelCase, _UpperCamelCase )
A_ = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''', _UpperCamelCase )
A_ = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(_UpperCamelCase, _UpperCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
A_ = os.path.abspath(_UpperCamelCase )
A_ = os.path.abspath(_UpperCamelCase )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
A_ = TransfoXLConfig()
else:
A_ = TransfoXLConfig.from_json_file(_UpperCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
A_ = TransfoXLLMHeadModel(_UpperCamelCase )
A_ = load_tf_weights_in_transfo_xl(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
# Save pytorch-model
A_ = os.path.join(_UpperCamelCase, _UpperCamelCase )
A_ = os.path.join(_UpperCamelCase, _UpperCamelCase )
print(F'''Save PyTorch model to {os.path.abspath(_UpperCamelCase )}''' )
torch.save(model.state_dict(), _UpperCamelCase )
print(F'''Save configuration file to {os.path.abspath(_UpperCamelCase )}''' )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : Dict = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
__snake_case : Dict = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 18
|
'''simple docstring'''
from collections import defaultdict
def _UpperCAmelCase ( _UpperCamelCase : int ) -> int:
A_ = 1
A_ = True
for v in tree[start]:
if v not in visited:
ret += dfs(_UpperCamelCase )
if ret % 2 == 0:
cuts.append(_UpperCamelCase )
return ret
def _UpperCAmelCase ( ) -> Optional[Any]:
dfs(1 )
if __name__ == "__main__":
__snake_case , __snake_case : Union[str, Any] = 10, 9
__snake_case : int = defaultdict(list)
__snake_case : dict[int, bool] = {}
__snake_case : list[int] = []
__snake_case : Union[str, Any] = 0
__snake_case : int = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18
| 1
|
def __magic_name__ ( A : list ):
'''simple docstring'''
if any(not isinstance(A, A ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(A ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(A, sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 107
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowerCAmelCase : int = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = """albert"""
def __init__( self : List[str] , __lowerCamelCase : Union[str, Any]=3_00_00 , __lowerCamelCase : Union[str, Any]=1_28 , __lowerCamelCase : Any=40_96 , __lowerCamelCase : str=12 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Union[str, Any]=64 , __lowerCamelCase : Dict=1_63_84 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]="gelu_new" , __lowerCamelCase : List[str]=0 , __lowerCamelCase : List[str]=0 , __lowerCamelCase : Tuple=5_12 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Optional[Any]=1e-12 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Any="absolute" , __lowerCamelCase : List[str]=0 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=3 , **__lowerCamelCase : int , ) -> int:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
a = vocab_size
a = embedding_size
a = hidden_size
a = num_hidden_layers
a = num_hidden_groups
a = num_attention_heads
a = inner_group_num
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = classifier_dropout_prob
a = position_embedding_type
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a = {0: "batch", 1: "choice", 2: "sequence"}
else:
a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 107
| 1
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__A : Dict = pytest.mark.integration
@pytest.mark.parametrize('path', ['paws', 'csv'] )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
inspect_dataset(__lowerCAmelCase, __lowerCAmelCase )
lowerCAmelCase : str = path + '''.py'''
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path', ['accuracy'] )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> str:
'''simple docstring'''
inspect_metric(__lowerCAmelCase, __lowerCAmelCase )
lowerCAmelCase : str = path + '''.py'''
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits', [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
], )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : List[str] = get_dataset_config_info(__lowerCAmelCase, config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception', [
('paws', None, ValueError),
], )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase, config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'path, expected', [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
], )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : List[str] = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config', [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
], )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : str = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
lowerCAmelCase : int = expected_configs[0]
assert expected_config in infos
lowerCAmelCase : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits', [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
], )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
lowerCAmelCase : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception', [
('paws', None, ValueError),
], )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Dict:
'''simple docstring'''
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase, config_name=__lowerCAmelCase )
| 350
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[List[PIL.Image.Image], np.ndarray]
lowerCAmelCase_ : Optional[List[bool]]
lowerCAmelCase_ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 323
| 0
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def __lowerCAmelCase ( lowercase : Any ) -> Dict:
"""simple docstring"""
snake_case : Tuple = botoa.client("iam" )
snake_case : List[str] = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_A , AssumeRolePolicyDocument=json.dumps(_A , indent=2 ) )
snake_case : str = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_A , PolicyName=F'{role_name}_policy_permission' , PolicyDocument=json.dumps(_A , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'role {role_name} already exists. Using existing one' )
def __lowerCAmelCase ( lowercase : str ) -> List[str]:
"""simple docstring"""
snake_case : int = botoa.client("iam" )
return iam_client.get_role(RoleName=_A )["Role"]["Arn"]
def __lowerCAmelCase ( ) -> str:
"""simple docstring"""
snake_case : Optional[int] = _ask_options(
"How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , _A , )
snake_case : Tuple = None
if credentials_configuration == 0:
snake_case : Any = _ask_field("Enter your AWS Profile name: [default] " , default="default" )
snake_case : Optional[Any] = aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
snake_case : Any = _ask_field("AWS Access Key ID: " )
snake_case : int = aws_access_key_id
snake_case : List[str] = _ask_field("AWS Secret Access Key: " )
snake_case : Dict = aws_secret_access_key
snake_case : Dict = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" )
snake_case : Dict = aws_region
snake_case : Union[str, Any] = _ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , _A , )
if role_management == 0:
snake_case : List[Any] = _ask_field("Enter your IAM role name: " )
else:
snake_case : Any = """accelerate_sagemaker_execution_role"""
print(F'Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials' )
_create_iam_role_for_sagemaker(_A )
snake_case : str = _ask_field(
"Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=_A , error_message="Please enter yes or no." , )
snake_case : Dict = None
if is_custom_docker_image:
snake_case : List[str] = _ask_field("Enter your Docker image: " , lambda lowercase : str(_A ).lower() )
snake_case : Optional[Any] = _ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=_A , error_message="Please enter yes or no." , )
snake_case : Dict = None
if is_sagemaker_inputs_enabled:
snake_case : Tuple = _ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda lowercase : str(_A ).lower() , )
snake_case : int = _ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=_A , error_message="Please enter yes or no." , )
snake_case : Optional[Any] = None
if is_sagemaker_metrics_enabled:
snake_case : Optional[int] = _ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda lowercase : str(_A ).lower() , )
snake_case : List[Any] = _ask_options(
"What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , )
snake_case : int = {}
snake_case : int = _ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=_A , error_message="Please enter yes or no." , )
if use_dynamo:
snake_case : Any = """dynamo_"""
snake_case : List[str] = _ask_options(
"Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
snake_case : List[str] = _ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=_A , error_message="Please enter yes or no." , )
if use_custom_options:
snake_case : str = _ask_options(
"Which mode do you want to use?" , _A , lambda lowercase : TORCH_DYNAMO_MODES[int(_A )] , default="default" , )
snake_case : Dict = _ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=_A , error_message="Please enter yes or no." , )
snake_case : str = _ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=_A , error_message="Please enter yes or no." , )
snake_case : Union[str, Any] = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
snake_case : Any = _ask_options(
_A , _A , lambda lowercase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_A )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case : Dict = _ask_field(_A , lambda lowercase : str(_A ).lower() , default="ml.p3.2xlarge" )
snake_case : Any = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case : Tuple = _ask_field(
"How many machines do you want use? [1]: " , _A , default=1 , )
snake_case : Optional[int] = _ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=_A , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_A , use_cpu=_A , dynamo_config=_A , eca_instance_type=_A , profile=_A , region=_A , iam_role_name=_A , mixed_precision=_A , num_machines=_A , sagemaker_inputs_file=_A , sagemaker_metrics_file=_A , )
| 203
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] ="""nllb-moe"""
UpperCAmelCase__ : Any =["""past_key_values"""]
UpperCAmelCase__ : Dict ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=1_2_8_1_1_2 , UpperCAmelCase__ : Tuple=1_0_2_4 , UpperCAmelCase__ : str=1_2 , UpperCAmelCase__ : int=4_0_9_6 , UpperCAmelCase__ : Dict=1_6 , UpperCAmelCase__ : Union[str, Any]=1_2 , UpperCAmelCase__ : int=4_0_9_6 , UpperCAmelCase__ : Optional[Any]=1_6 , UpperCAmelCase__ : Union[str, Any]=0.05 , UpperCAmelCase__ : Any=0.05 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Union[str, Any]="relu" , UpperCAmelCase__ : Dict=1_0_2_4 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="float32" , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Union[str, Any]=1_2_8 , UpperCAmelCase__ : Any=6_4 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Optional[Any]=0.0_01 , UpperCAmelCase__ : Optional[Any]=0.0_01 , UpperCAmelCase__ : Dict="all" , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : List[str]=1.0 , UpperCAmelCase__ : Optional[int]=0.2 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Tuple=False , **UpperCAmelCase__ : Union[str, Any] , ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : str = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE : str = encoder_attention_heads
SCREAMING_SNAKE_CASE : List[str] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Dict = activation_function
SCREAMING_SNAKE_CASE : str = init_std
SCREAMING_SNAKE_CASE : Tuple = encoder_layerdrop
SCREAMING_SNAKE_CASE : Tuple = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[str] = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : Tuple = router_z_loss_coef
SCREAMING_SNAKE_CASE : Tuple = router_aux_loss_coef
SCREAMING_SNAKE_CASE : List[Any] = decoder_sparse_step
SCREAMING_SNAKE_CASE : Any = encoder_sparse_step
SCREAMING_SNAKE_CASE : Tuple = num_experts
SCREAMING_SNAKE_CASE : Optional[int] = expert_capacity
SCREAMING_SNAKE_CASE : int = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
SCREAMING_SNAKE_CASE : Optional[int] = router_dtype
SCREAMING_SNAKE_CASE : Any = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : Any = batch_prioritized_routing
SCREAMING_SNAKE_CASE : Optional[Any] = second_expert_policy
SCREAMING_SNAKE_CASE : Any = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE : Tuple = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE : int = moe_token_dropout
SCREAMING_SNAKE_CASE : Optional[int] = output_router_logits
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 245
| 0
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: Optional[int] ):
'''simple docstring'''
super().__init__()
_snake_case : List[str] = nn.Linear(3, 4 )
_snake_case : int = nn.BatchNormad(4 )
_snake_case : List[str] = nn.Linear(4, 5 )
def UpperCamelCase_ ( self: Any, a_: Union[str, Any] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(a_ ) ) )
class lowercase( __a ):
'''simple docstring'''
def UpperCamelCase_ ( self: Any, a_: int, *a_: Dict, **a_: Dict ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class lowercase( __a ):
'''simple docstring'''
def UpperCamelCase_ ( self: str, a_: Tuple, a_: Union[str, Any] ):
'''simple docstring'''
return output + 1
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = ModelForTest()
_snake_case : List[str] = ModelHook()
add_hook_to_module(a_, a_ )
self.assertEqual(test_model._hf_hook, a_ )
self.assertTrue(hasattr(a_, """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ["""x"""] )
remove_hook_from_module(a_ )
self.assertFalse(hasattr(a_, """_hf_hook""" ) )
self.assertFalse(hasattr(a_, """_old_forward""" ) )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Dict = ModelForTest()
_snake_case : List[Any] = ModelHook()
add_hook_to_module(a_, a_ )
add_hook_to_module(a_, a_, append=a_ )
self.assertEqual(isinstance(test_model._hf_hook, a_ ), a_ )
self.assertEqual(len(test_model._hf_hook.hooks ), 2 )
self.assertTrue(hasattr(a_, """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ["""x"""] )
remove_hook_from_module(a_ )
self.assertFalse(hasattr(a_, """_hf_hook""" ) )
self.assertFalse(hasattr(a_, """_old_forward""" ) )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = ModelForTest()
_snake_case : Optional[Any] = torch.randn(2, 3 )
_snake_case : List[Any] = test_model(x + 1 )
_snake_case : List[str] = test_model(x + 2 )
_snake_case : Any = PreForwardHook()
add_hook_to_module(a_, a_ )
_snake_case : List[Any] = test_model(a_ )
self.assertTrue(torch.allclose(a_, a_, atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_snake_case : List[str] = PreForwardHook()
add_hook_to_module(a_, a_ )
_snake_case : Tuple = test_model(a_ )
self.assertTrue(torch.allclose(a_, a_, atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_snake_case : str = SequentialHook(PreForwardHook(), PreForwardHook() )
add_hook_to_module(a_, a_ )
_snake_case : str = test_model(a_ )
assert torch.allclose(a_, a_, atol=1E-5 )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = ModelForTest()
_snake_case : List[Any] = torch.randn(2, 3 )
_snake_case : List[str] = test_model(a_ )
_snake_case : List[Any] = PostForwardHook()
add_hook_to_module(a_, a_ )
_snake_case : Union[str, Any] = test_model(a_ )
self.assertTrue(torch.allclose(a_, output + 1, atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_snake_case : Tuple = PostForwardHook()
add_hook_to_module(a_, a_ )
_snake_case : Optional[Any] = test_model(a_ )
self.assertTrue(torch.allclose(a_, output + 1, atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_snake_case : Dict = SequentialHook(PostForwardHook(), PostForwardHook() )
add_hook_to_module(a_, a_ )
_snake_case : List[str] = test_model(a_ )
assert torch.allclose(a_, output + 2, atol=1E-5 )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : str = ModelForTest()
_snake_case : Any = torch.randn(2, 3 )
_snake_case : List[str] = test_model(a_ )
_snake_case : List[Any] = PostForwardHook()
add_hook_to_module(a_, a_ )
_snake_case : Dict = test_model(a_ )
self.assertTrue(torch.allclose(a_, output + 1 ) )
self.assertTrue(outputa.requires_grad )
_snake_case : Union[str, Any] = True
_snake_case : Dict = test_model(a_ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Dict = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device, torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device, torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device(0 ) )
self.assertEqual(model.lineara.weight.device, torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_snake_case : Any = torch.randn(2, 3 )
_snake_case : Any = model(a_ )
self.assertEqual(output.device, torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(a_, AlignDevicesHook(io_same_device=a_ ) )
_snake_case : int = torch.randn(2, 3 ).to(0 )
_snake_case : Optional[Any] = model(a_ )
self.assertEqual(output.device, torch.device(0 ) )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
_snake_case : Optional[int] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara, AlignDevicesHook(**a_ ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(**a_ ) )
add_hook_to_module(model.lineara, AlignDevicesHook(**a_ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_snake_case : Union[str, Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device, a_ )
_snake_case : str = torch.randn(2, 3 )
_snake_case : Optional[int] = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
_snake_case : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara, AlignDevicesHook(**a_ ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(**a_ ) )
add_hook_to_module(model.lineara, AlignDevicesHook(**a_ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
_snake_case : List[Any] = torch.randn(2, 3 )
_snake_case : Any = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
_snake_case : Tuple = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(a_, execution_device=a_, offload=a_ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_snake_case : Optional[int] = torch.device(a_ )
self.assertEqual(model.batchnorm.running_mean.device, a_ )
_snake_case : List[Any] = torch.randn(2, 3 )
_snake_case : List[Any] = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a_ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(a_, execution_device=a_, offload=a_, offload_buffers=a_ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
_snake_case : str = torch.randn(2, 3 )
_snake_case : List[Any] = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a_ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
_snake_case : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
a_, execution_device=a_, offload=a_, weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_snake_case : Optional[Any] = torch.device(a_ )
self.assertEqual(model.batchnorm.running_mean.device, a_ )
_snake_case : int = torch.randn(2, 3 )
_snake_case : str = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a_ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
a_, execution_device=a_, offload=a_, weights_map=model.state_dict(), offload_buffers=a_, )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
_snake_case : Optional[int] = torch.randn(2, 3 )
_snake_case : Any = model(a_ )
self.assertEqual(output.device, a_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a_ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
| 132
|
"""simple docstring"""
from typing import Any
class lowercase:
'''simple docstring'''
def __init__( self: Dict, a_: Any ):
'''simple docstring'''
_snake_case : Dict = data
_snake_case : Optional[Any] = None
class lowercase:
'''simple docstring'''
def __init__( self: str ):
'''simple docstring'''
_snake_case : Any = None
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[Any] = self.head
while temp is not None:
print(temp.data, end=""" """ )
_snake_case : int = temp.next
print()
def UpperCamelCase_ ( self: Union[str, Any], a_: Any ):
'''simple docstring'''
_snake_case : Optional[Any] = Node(a_ )
_snake_case : Union[str, Any] = self.head
_snake_case : List[Any] = new_node
def UpperCamelCase_ ( self: Tuple, a_: List[str], a_: Union[str, Any] ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
_snake_case : int = self.head
while node_a is not None and node_a.data != node_data_a:
_snake_case : List[Any] = node_a.next
_snake_case : List[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
_snake_case : List[Any] = node_a.next
if node_a is None or node_a is None:
return
_snake_case , _snake_case : int = node_a.data, node_a.data
if __name__ == "__main__":
A_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 132
| 1
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase__ ( _a):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_a)
def lowerCamelCase__ ( _a):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : Union[str, Any] = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(_a , id=_a)
| 76
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Any = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
lowercase : List[str] = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
lowercase : List[Any] = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : int = '''whisper'''
__A : List[Any] = ['''past_key_values''']
__A : Optional[int] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowercase=5_1865 , lowercase=80 , lowercase=6 , lowercase=4 , lowercase=6 , lowercase=4 , lowercase=1536 , lowercase=1536 , lowercase=0.0 , lowercase=0.0 , lowercase=5_0257 , lowercase=True , lowercase=True , lowercase="gelu" , lowercase=256 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=False , lowercase=1500 , lowercase=448 , lowercase=5_0256 , lowercase=5_0256 , lowercase=5_0256 , lowercase=None , lowercase=[220, 5_0256] , lowercase=False , lowercase=256 , lowercase=False , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase=7 , **lowercase , ) -> str:
'''simple docstring'''
a__ : int = vocab_size
a__ : int = num_mel_bins
a__ : Optional[int] = d_model
a__ : List[str] = encoder_layers
a__ : Dict = encoder_attention_heads
a__ : List[str] = decoder_layers
a__ : Tuple = decoder_attention_heads
a__ : List[str] = decoder_ffn_dim
a__ : Optional[Any] = encoder_ffn_dim
a__ : Tuple = dropout
a__ : Optional[int] = attention_dropout
a__ : Any = activation_dropout
a__ : Any = activation_function
a__ : List[Any] = init_std
a__ : Optional[int] = encoder_layerdrop
a__ : Union[str, Any] = decoder_layerdrop
a__ : Tuple = use_cache
a__ : List[str] = encoder_layers
a__ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
a__ : Dict = max_source_positions
a__ : Dict = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
a__ : Optional[int] = classifier_proj_size
a__ : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a__ : List[Any] = apply_spec_augment
a__ : int = mask_time_prob
a__ : int = mask_time_length
a__ : List[Any] = mask_time_min_masks
a__ : str = mask_feature_prob
a__ : Optional[int] = mask_feature_length
a__ : Union[str, Any] = mask_feature_min_masks
a__ : Tuple = median_filter_width
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , suppress_tokens=lowercase , begin_suppress_tokens=lowercase , **lowercase , )
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
@property
def __lowercase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
a__ : List[str] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
])
if self.use_past:
a__ : Optional[Any] = {0: 'batch'}
else:
a__ : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction='inputs')
return common_inputs
def __lowercase ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , lowercase = 2_2050 , lowercase = 5.0 , lowercase = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = OrderedDict()
a__ : int = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase , framework=lowercase , sampling_rate=lowercase , time_duration=lowercase , frequency=lowercase , )
a__ : List[Any] = encoder_inputs['input_features'].shape[2]
a__ : Optional[int] = encoder_sequence_length // 2 if self.use_past else seq_length
a__ : Any = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase , lowercase , lowercase , lowercase)
a__ : List[str] = encoder_inputs.pop('input_features')
a__ : Optional[int] = decoder_inputs.pop('decoder_input_ids')
if "past_key_values" in decoder_inputs:
a__ : List[str] = decoder_inputs.pop('past_key_values')
return dummy_inputs
@property
def __lowercase ( self) -> float:
'''simple docstring'''
return 1e-3
| 99
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[str] = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 91
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCamelCase__:
__magic_name__ : List[str]
__magic_name__ : Optional[str] = None
# Automatically constructed
__magic_name__ : ClassVar[str] = "dict"
__magic_name__ : ClassVar[Any] = None
__magic_name__ : str = field(default="Translation" , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self : Union[str, Any] )-> str:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def a__( self : int )-> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class UpperCamelCase__:
__magic_name__ : Optional[List] = None
__magic_name__ : Optional[int] = None
__magic_name__ : Optional[str] = None
# Automatically constructed
__magic_name__ : ClassVar[str] = "dict"
__magic_name__ : ClassVar[Any] = None
__magic_name__ : str = field(default="TranslationVariableLanguages" , init=lowerCAmelCase , repr=lowerCAmelCase )
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = sorted(set(self.languages ) ) if self.languages else None
UpperCAmelCase = len(self.languages ) if self.languages else None
def __call__( self : int )-> Optional[Any]:
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def a__( self : Optional[int] , lowerCAmelCase : Dict )-> Tuple:
"""simple docstring"""
UpperCAmelCase = set(self.languages )
if self.languages and set(lowerCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(lowerCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(lowerCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
UpperCAmelCase = []
for lang, text in translation_dict.items():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
UpperCAmelCase , UpperCAmelCase = zip(*sorted(lowerCAmelCase ) )
return {"language": languages, "translation": translations}
def a__( self : Any )-> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 91
| 1
|
'''simple docstring'''
import requests
__lowerCAmelCase = """""" # <-- Put your OpenWeatherMap appid here!
__lowerCAmelCase = """https://api.openweathermap.org/data/2.5/"""
def UpperCAmelCase_ (__a : str = "Chicago" , __a : str = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def UpperCAmelCase_ (__a : str = "Kolkata, India" , __a : str = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def UpperCAmelCase_ (__a : float = 55.68 , __a : float = 12.57 , __a : str = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__lowerCAmelCase = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 271
|
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase = 1_6
__lowerCAmelCase = 3_2
def UpperCAmelCase_ (__a : Accelerator , __a : DatasetDict , __a : List[int] , __a : List[int] , __a : int = 1_6 ):
"""simple docstring"""
_a : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
_a : str = DatasetDict(
{
'train': dataset['train'].select(__a ),
'validation': dataset['train'].select(__a ),
'test': dataset['validation'],
} )
def tokenize_function(__a : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
_a : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a : List[str] = datasets.map(
__a , batched=__a , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : List[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__a : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a : Tuple = 1_6
elif accelerator.mixed_precision != "no":
_a : List[Any] = 8
else:
_a : List[Any] = None
return tokenizer.pad(
__a , padding='longest' , max_length=__a , pad_to_multiple_of=__a , return_tensors='pt' , )
# Instantiate dataloaders.
_a : Any = DataLoader(
tokenized_datasets['train'] , shuffle=__a , collate_fn=__a , batch_size=__a )
_a : Optional[int] = DataLoader(
tokenized_datasets['validation'] , shuffle=__a , collate_fn=__a , batch_size=__a )
_a : Optional[Any] = DataLoader(
tokenized_datasets['test'] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader, test_dataloader
def UpperCAmelCase_ (__a : Any , __a : Union[str, Any] ):
"""simple docstring"""
_a : Dict = []
# Download the dataset
_a : Tuple = load_dataset('glue' , 'mrpc' )
# Create our splits
_a : Union[str, Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_a : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : Optional[Any] = config['lr']
_a : Optional[int] = int(config['num_epochs'] )
_a : Dict = int(config['seed'] )
_a : Dict = int(config['batch_size'] )
_a : Optional[int] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
_a : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a : Any = batch_size // MAX_GPU_BATCH_SIZE
_a : List[str] = MAX_GPU_BATCH_SIZE
set_seed(__a )
# New Code #
# Create our folds:
_a : int = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
_a : Any = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__a ):
_a, _a, _a : Optional[Any] = get_fold_dataloaders(
__a , __a , __a , __a , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : Dict = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_a : List[str] = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
_a : List[Any] = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=1_0_0 , num_training_steps=(len(__a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a, _a, _a, _a, _a : Union[str, Any] = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a : Dict = model(**__a )
_a : int = outputs.loss
_a : Any = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Union[str, Any] = model(**__a )
_a : Tuple = outputs.logits.argmax(dim=-1 )
_a, _a : Any = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__a , references=__a , )
_a : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __a )
# New Code #
# We also run predictions on the test set at the very end
_a : Any = []
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Tuple = model(**__a )
_a : Dict = outputs.logits
_a, _a : Optional[int] = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__a , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_a : Dict = torch.cat(__a , dim=0 )
_a : Any = torch.stack(__a , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_a : str = metric.compute(predictions=__a , references=__a )
accelerator.print('Average test metrics from all folds:' , __a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__a , default=__a , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=__a , default=3 , help='The number of splits to perform across the dataset' )
_a : Any = parser.parse_args()
_a : int = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 271
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 366
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''perceiver'''
def __init__(self : Dict , _lowerCAmelCase : List[str]=256 , _lowerCAmelCase : Any=1280 , _lowerCAmelCase : Dict=768 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : Optional[int]=26 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[Any]="kv" , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : int=1 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Any=1e-12 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : int=262 , _lowerCAmelCase : int=2048 , _lowerCAmelCase : int=56 , _lowerCAmelCase : List[Any]=[368, 496] , _lowerCAmelCase : List[Any]=16 , _lowerCAmelCase : Any=1920 , _lowerCAmelCase : Optional[int]=16 , _lowerCAmelCase : List[Any]=[1, 16, 224, 224] , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
A = num_latents
A = d_latents
A = d_model
A = num_blocks
A = num_self_attends_per_block
A = num_self_attention_heads
A = num_cross_attention_heads
A = qk_channels
A = v_channels
A = cross_attention_shape_for_attention
A = self_attention_widening_factor
A = cross_attention_widening_factor
A = hidden_act
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = use_query_residual
# masked language modeling attributes
A = vocab_size
A = max_position_embeddings
# image classification attributes
A = image_size
# flow attributes
A = train_size
# multimodal autoencoding attributes
A = num_frames
A = audio_samples_per_frame
A = samples_per_patch
A = output_shape
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@property
def A (self : List[str] ):
if self.task == "multiple-choice":
A = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def A (self : Dict ):
return 1e-4
def A (self : List[Any] , _lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 40 , _lowerCAmelCase : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = preprocessor.num_special_tokens_to_add(_lowerCAmelCase )
A = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
A = [""" """.join(["""a"""] ) * seq_length] * batch_size
A = dict(preprocessor(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
A = inputs.pop("""input_ids""" )
return inputs
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
A = self._generate_dummy_images(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A = dict(preprocessor(images=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
A = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 337
| 0
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _A :
UpperCamelCase__ : Optional[Union[str, Path]] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : Optional[Dict] = None
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = 1
UpperCamelCase__ : Optional[Union[str, bool]] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : Optional[Dict] = None
UpperCamelCase__ : Optional[str] = None
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(__SCREAMING_SNAKE_CASE) for k, v in self.__dict__.items()})
| 49
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase )
else:
__a = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase )
for i, tensor in enumerate(_UpperCAmelCase ):
if padding_side == "right":
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = tensor[:sequence_length]
else:
__a = tensor[:sequence_length]
return out_tensor.tolist()
def __snake_case ( _UpperCAmelCase ):
__a = ord(_UpperCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__a = unicodedata.category(_UpperCAmelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : PreTrainedTokenizerBase
UpperCamelCase__ : Union[bool, str, PaddingStrategy] = True
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : int = -100
UpperCamelCase__ : str = "pt"
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
import torch
__a = '''label''' if '''label''' in features[0].keys() else '''labels'''
__a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__a = self.tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__a = torch.tensor(batch['''entity_ids''']).shape[1]
__a = self.tokenizer.padding_side
if padding_side == "right":
__a = [
list(__SCREAMING_SNAKE_CASE) + [self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) for label in labels
]
else:
__a = [
[self.label_pad_token_id] * (sequence_length - len(__SCREAMING_SNAKE_CASE)) + list(__SCREAMING_SNAKE_CASE) for label in labels
]
__a = [feature['''ner_tags'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , -1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = [feature['''original_entity_spans'''] for feature in features]
__a = padding_tensor(__SCREAMING_SNAKE_CASE , (-1, -1) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {k: torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa) for k, v in batch.items()}
return batch
| 49
| 1
|
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any]):
'''simple docstring'''
if num < 0:
return False
lowerCAmelCase__ : Optional[int] = num
lowerCAmelCase__ : Dict = 0
while num > 0:
lowerCAmelCase__ : Dict = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : int =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =XLMProphetNetTokenizer
snake_case_ =False
snake_case_ =True
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : int = XLMProphetNetTokenizer(__lowerCamelCase ,keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : str = '''[PAD]'''
lowerCAmelCase__ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) ,__lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) ,__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''[PAD]''' )
self.assertEqual(vocab_keys[1] ,'''[CLS]''' )
self.assertEqual(vocab_keys[-1] ,'''j''' )
self.assertEqual(len(__lowerCamelCase ) ,10_12 )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size ,10_12 )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = XLMProphetNetTokenizer(__lowerCamelCase ,keep_accents=__lowerCamelCase )
lowerCAmelCase__ : Tuple = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowerCamelCase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,)
lowerCAmelCase__ : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
lowerCAmelCase__ : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] ,)
lowerCAmelCase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] ,)
@cached_property
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = '''Hello World!'''
lowerCAmelCase__ : str = [3_53_89, 66_72, 49, 2]
self.assertListEqual(__lowerCamelCase ,self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Any = {'''input_ids''': [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase ,model_name='''microsoft/xprophetnet-large-wiki100-cased''' ,revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' ,)
| 94
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowerCAmelCase : Any = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ) -> Union[str, Any]:
lowerCamelCase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=snake_case__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=snake_case__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=snake_case__ )
return parser.parse_args()
def a__ ( ) -> List[str]:
lowerCamelCase = parse_args()
# Import training_script as a module.
lowerCamelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCamelCase = script_fpath.stem
lowerCamelCase = importlib.import_module(snake_case__ )
# Patch sys.argv
lowerCamelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 291
| 1
|
from __future__ import annotations
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : int = 0 ):
snake_case__ : List[Any] = key
def _lowercase ( self : Any , __A : str , __A : int ):
assert isinstance(__A , __A ) and isinstance(__A , __A )
snake_case__ : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(__A ) ^ key ) for ch in content]
def _lowercase ( self : str , __A : str , __A : int ):
assert isinstance(__A , __A ) and isinstance(__A , __A )
snake_case__ : Tuple = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(__A ) ^ key ) for ch in content]
def _lowercase ( self : Optional[int] , __A : str , __A : int = 0 ):
assert isinstance(__A , __A ) and isinstance(__A , __A )
snake_case__ : int = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
snake_case__ : List[str] = ""
for ch in content:
ans += chr(ord(__A ) ^ key )
return ans
def _lowercase ( self : Any , __A : str , __A : int = 0 ):
assert isinstance(__A , __A ) and isinstance(__A , __A )
snake_case__ : Optional[Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
snake_case__ : Dict = ""
for ch in content:
ans += chr(ord(__A ) ^ key )
return ans
def _lowercase ( self : Dict , __A : str , __A : int = 0 ):
assert isinstance(__A , __A ) and isinstance(__A , __A )
try:
with open(__A ) as fin, open("encrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__A , __A ) )
except OSError:
return False
return True
def _lowercase ( self : int , __A : str , __A : int ):
assert isinstance(__A , __A ) and isinstance(__A , __A )
try:
with open(__A ) as fin, open("decrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__A , __A ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 286
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : str , snake_case_ : Union[str, Any] ):
# Initialise PyTorch model
snake_case__ : List[str] = MobileBertConfig.from_json_file(snake_case_ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case__ : Dict = MobileBertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
snake_case__ : Any = load_tf_weights_in_mobilebert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCamelCase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 286
| 1
|
import os
from distutils.util import strtobool
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Tuple ):
"""simple docstring"""
for e in env_keys:
SCREAMING_SNAKE_CASE_ : Dict = int(os.environ.get(lowerCAmelCase , -1 ) )
if val >= 0:
return val
return default
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = os.environ.get(lowerCAmelCase , str(lowerCAmelCase ) )
return strtobool(lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Tuple="no" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = os.environ.get(lowerCAmelCase , str(lowerCAmelCase ) )
return value
| 18
|
def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = collection[i]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Tuple = i - 1
while low <= high:
SCREAMING_SNAKE_CASE_ : int = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collection[j - 1]
SCREAMING_SNAKE_CASE_ : int = val
return collection
if __name__ == "__main__":
__lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 18
| 1
|
from __future__ import annotations
def lowerCAmelCase__ ( lowerCamelCase_ : list[int]):
'''simple docstring'''
lowerCAmelCase__ : List[str] = len(lowerCamelCase_) // 2
# choose the middle 3 elements
lowerCAmelCase__ : Dict = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m]) == 2:
m -= 1
return peak(lst[m:])
# decreasing
else:
if len(lst[:m]) == 2:
m += 1
return peak(lst[:m])
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__snake_case : Dict =HfArgumentParser(InitializationArguments)
__snake_case : Tuple =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__snake_case : Optional[int] =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__snake_case : List[str] ={
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
__snake_case : List[Any] =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__snake_case : int =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 94
| 0
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_snake_case = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
_snake_case = {"""facebook/blenderbot-3B""": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _A ( ) -> Any:
_lowercase : str = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_lowercase : int = bs[:]
_lowercase : int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case )
cs.append(2**8 + n )
n += 1
_lowercase : int = [chr(snake_case ) for n in cs]
return dict(zip(snake_case , snake_case ) )
def _A ( snake_case ) -> int:
_lowercase : Any = set()
_lowercase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowercase : int = char
return pairs
class a__ ( _SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="replace" , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase=False , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : Union[str, Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
_lowercase : Optional[int] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
_lowercase : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
_lowercase : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
_lowercase : Dict = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
_lowercase : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowercase : Optional[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding="utf-8" ) as vocab_handle:
_lowercase : Dict = json.load(__A )
_lowercase : int = {v: k for k, v in self.encoder.items()}
_lowercase : Optional[Any] = errors # how to handle errors in decoding
_lowercase : Optional[int] = bytes_to_unicode()
_lowercase : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding="utf-8" ) as merges_handle:
_lowercase : str = merges_handle.read().split("\n" )[1:-1]
_lowercase : Dict = [tuple(merge.split() ) for merge in bpe_merges]
_lowercase : Dict = dict(zip(__A , range(len(__A ) ) ) )
_lowercase : Optional[Any] = {}
_lowercase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowercase : Optional[Any] = re.compile(R"\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCamelCase ( self ):
"""simple docstring"""
return len(self.encoder )
def _lowerCamelCase ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_lowercase : List[Any] = tuple(__A )
_lowercase : Optional[Any] = get_pairs(__A )
if not pairs:
return token
while True:
_lowercase : Any = min(__A , key=lambda _UpperCamelCase : self.bpe_ranks.get(__A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_lowercase , _lowercase : Any = bigram
_lowercase : Optional[int] = []
_lowercase : List[Any] = 0
while i < len(__A ):
try:
_lowercase : Optional[int] = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowercase : int = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowercase : List[str] = tuple(__A )
_lowercase : Tuple = new_word
if len(__A ) == 1:
break
else:
_lowercase : Dict = get_pairs(__A )
_lowercase : Union[str, Any] = " ".join(__A )
_lowercase : List[str] = word
return word
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : int = []
for token in re.findall(self.pat , __A ):
_lowercase : Optional[int] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) )
return bpe_tokens
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
return self.decoder.get(__A )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Any = "".join(__A )
_lowercase : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowercase : List[str] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_lowercase : int = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" )
_lowercase : str = 0
with open(__A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
_lowercase : List[str] = token_index
writer.write(" ".join(__A ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
_lowercase : Optional[int] = [self.sep_token_id]
_lowercase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=False , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : Tuple = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
_lowercase : str = " " + text
return (text, kwargs)
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : int = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowercase : int = " ".join(__A )
_lowercase : Optional[Any] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowercase : str = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 250
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ : Any = logging.get_logger(__name__)
lowerCamelCase_ : Tuple = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCamelCase_ : str = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
lowerCamelCase_ : Optional[int] = {
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
lowerCamelCase_ : Tuple = """▁"""
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , __A , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A = None , **__A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
a ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
a =vocab_file
a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
a ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a =len(self.sp_model ) - 1
a ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a =[self.cls_token_id]
a =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
a =[self.sep_token_id]
a =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a ={self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[str]:
return self.sp_model.encode(__A , out_type=__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a =self.sp_model.PieceToId(__A )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Tuple:
a =[]
a =''''''
a =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
a =True
a =[]
else:
current_sub_tokens.append(__A )
a =False
out_string += self.sp_model.decode(__A )
return out_string.strip()
def __getstate__( self ) -> Tuple:
a =self.__dict__.copy()
a =None
return state
def __setstate__( self , __A ) -> Tuple:
a =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a ={}
a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
a =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , '''wb''' ) as fi:
a =self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 81
| 0
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( _snake_case ):
UpperCamelCase__ : jnp.ndarray
@flax_register_to_config
class a_ ( nn.Module , _snake_case , _snake_case ):
UpperCamelCase__ : int =32
UpperCamelCase__ : int =4
UpperCamelCase__ : int =4
UpperCamelCase__ : Tuple[str] =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCamelCase__ : Tuple[str] =("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
UpperCamelCase__ : Union[bool, Tuple[bool]] =False
UpperCamelCase__ : Tuple[int] =(3_20, 6_40, 12_80, 12_80)
UpperCamelCase__ : int =2
UpperCamelCase__ : Union[int, Tuple[int]] =8
UpperCamelCase__ : Optional[Union[int, Tuple[int]]] =None
UpperCamelCase__ : int =12_80
UpperCamelCase__ : float =0.0
UpperCamelCase__ : bool =False
UpperCamelCase__ : jnp.dtype =jnp.floataa
UpperCamelCase__ : bool =True
UpperCamelCase__ : int =0
UpperCamelCase__ : bool =False
def __a ( self :int , _lowercase :jax.random.KeyArray) -> FrozenDict:
# init input tensors
UpperCAmelCase_ = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCAmelCase_ = jnp.zeros(_lowercase , dtype=jnp.floataa)
UpperCAmelCase_ = jnp.ones((1,) , dtype=jnp.intaa)
UpperCAmelCase_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa)
UpperCAmelCase_ , UpperCAmelCase_ = jax.random.split(_lowercase)
UpperCAmelCase_ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowercase , _lowercase , _lowercase , _lowercase)["params"]
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = self.block_out_channels
UpperCAmelCase_ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''')
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCAmelCase_ = self.num_attention_heads or self.attention_head_dim
# input
UpperCAmelCase_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
UpperCAmelCase_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift)
UpperCAmelCase_ = FlaxTimestepEmbedding(_lowercase , dtype=self.dtype)
UpperCAmelCase_ = self.only_cross_attention
if isinstance(_lowercase , _lowercase):
UpperCAmelCase_ = (only_cross_attention,) * len(self.down_block_types)
if isinstance(_lowercase , _lowercase):
UpperCAmelCase_ = (num_attention_heads,) * len(self.down_block_types)
# down
UpperCAmelCase_ = []
UpperCAmelCase_ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types):
UpperCAmelCase_ = output_channel
UpperCAmelCase_ = block_out_channels[i]
UpperCAmelCase_ = i == len(_lowercase) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCAmelCase_ = FlaxCrossAttnDownBlockaD(
in_channels=_lowercase , out_channels=_lowercase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
UpperCAmelCase_ = FlaxDownBlockaD(
in_channels=_lowercase , out_channels=_lowercase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowercase)
UpperCAmelCase_ = down_blocks
# mid
UpperCAmelCase_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
UpperCAmelCase_ = []
UpperCAmelCase_ = list(reversed(_lowercase))
UpperCAmelCase_ = list(reversed(_lowercase))
UpperCAmelCase_ = list(reversed(_lowercase))
UpperCAmelCase_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types):
UpperCAmelCase_ = output_channel
UpperCAmelCase_ = reversed_block_out_channels[i]
UpperCAmelCase_ = reversed_block_out_channels[min(i + 1 , len(_lowercase) - 1)]
UpperCAmelCase_ = i == len(_lowercase) - 1
if up_block_type == "CrossAttnUpBlock2D":
UpperCAmelCase_ = FlaxCrossAttnUpBlockaD(
in_channels=_lowercase , out_channels=_lowercase , prev_output_channel=_lowercase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
UpperCAmelCase_ = FlaxUpBlockaD(
in_channels=_lowercase , out_channels=_lowercase , prev_output_channel=_lowercase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowercase)
UpperCAmelCase_ = output_channel
UpperCAmelCase_ = up_blocks
# out
UpperCAmelCase_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5)
UpperCAmelCase_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self :Any , _lowercase :Optional[int] , _lowercase :Optional[int] , _lowercase :Tuple , _lowercase :List[str]=None , _lowercase :Union[str, Any]=None , _lowercase :bool = True , _lowercase :bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_lowercase , jnp.ndarray):
UpperCAmelCase_ = jnp.array([timesteps] , dtype=jnp.intaa)
elif isinstance(_lowercase , jnp.ndarray) and len(timesteps.shape) == 0:
UpperCAmelCase_ = timesteps.astype(dtype=jnp.floataa)
UpperCAmelCase_ = jnp.expand_dims(_lowercase , 0)
UpperCAmelCase_ = self.time_proj(_lowercase)
UpperCAmelCase_ = self.time_embedding(_lowercase)
# 2. pre-process
UpperCAmelCase_ = jnp.transpose(_lowercase , (0, 2, 3, 1))
UpperCAmelCase_ = self.conv_in(_lowercase)
# 3. down
UpperCAmelCase_ = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowercase , _lowercase):
UpperCAmelCase_ , UpperCAmelCase_ = down_block(_lowercase , _lowercase , _lowercase , deterministic=not train)
else:
UpperCAmelCase_ , UpperCAmelCase_ = down_block(_lowercase , _lowercase , deterministic=not train)
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
UpperCAmelCase_ = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowercase , _lowercase):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
UpperCAmelCase_ = new_down_block_res_samples
# 4. mid
UpperCAmelCase_ = self.mid_block(_lowercase , _lowercase , _lowercase , deterministic=not train)
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
UpperCAmelCase_ = down_block_res_samples[-(self.layers_per_block + 1) :]
UpperCAmelCase_ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowercase , _lowercase):
UpperCAmelCase_ = up_block(
_lowercase , temb=_lowercase , encoder_hidden_states=_lowercase , res_hidden_states_tuple=_lowercase , deterministic=not train , )
else:
UpperCAmelCase_ = up_block(_lowercase , temb=_lowercase , res_hidden_states_tuple=_lowercase , deterministic=not train)
# 6. post-process
UpperCAmelCase_ = self.conv_norm_out(_lowercase)
UpperCAmelCase_ = nn.silu(_lowercase)
UpperCAmelCase_ = self.conv_out(_lowercase)
UpperCAmelCase_ = jnp.transpose(_lowercase , (0, 3, 1, 2))
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowercase)
| 363
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a_ ( unittest.TestCase ):
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = BlipImageProcessor()
UpperCAmelCase_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''')
UpperCAmelCase_ = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''')
UpperCAmelCase_ = InstructBlipProcessor(_lowercase , _lowercase , _lowercase)
processor.save_pretrained(self.tmpdirname)
def __a ( self :List[Any] , **_lowercase :Dict) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).tokenizer
def __a ( self :Optional[Any] , **_lowercase :Optional[Any]) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).image_processor
def __a ( self :Dict , **_lowercase :Tuple) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase).qformer_tokenizer
def __a ( self :Optional[int]) -> str:
shutil.rmtree(self.tmpdirname)
def __a ( self :Any) -> List[str]:
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def __a ( self :Tuple) -> int:
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
UpperCAmelCase_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0)
UpperCAmelCase_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
self.assertIsInstance(processor.qformer_tokenizer , _lowercase)
def __a ( self :Dict) -> Any:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(_lowercase , return_tensors='''np''')
UpperCAmelCase_ = processor(images=_lowercase , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def __a ( self :Union[str, Any]) -> Dict:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = processor(text=_lowercase)
UpperCAmelCase_ = tokenizer(_lowercase , return_token_type_ids=_lowercase)
UpperCAmelCase_ = qformer_tokenizer(_lowercase , return_token_type_ids=_lowercase)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key])
def __a ( self :Dict) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def __a ( self :Optional[int]) -> Optional[Any]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(_lowercase)
UpperCAmelCase_ = tokenizer.batch_decode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :str) -> int:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_qformer_tokenizer()
UpperCAmelCase_ = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 344
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ : List[Any] = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""OwlViTFeatureExtractor"""]
lowerCamelCase_ : List[Any] = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 81
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : int = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 333
| 0
|
import torch
from diffusers import StableDiffusionPipeline
__snake_case = """path-to-your-trained-model"""
__snake_case = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
__snake_case = """A photo of sks dog in a bucket"""
__snake_case = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 169
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__snake_case = logging.getLogger(__name__)
class lowercase__ ( _UpperCAmelCase ):
A__ : Tuple ="""summarization"""
A__ : Optional[int] =["""loss"""]
A__ : Optional[Any] =ROUGE_KEYS
A__ : str ="""rouge2"""
def __init__( self : List[str] , UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ):
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE__ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(UpperCAmelCase_ , num_labels=UpperCAmelCase_ , mode=self.mode , **UpperCAmelCase_ )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE__ = Path(self.output_dir ) / 'metrics.json'
SCREAMING_SNAKE_CASE__ = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = defaultdict(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.config.model_type
SCREAMING_SNAKE_CASE__ = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
SCREAMING_SNAKE_CASE__ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE__ = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
SCREAMING_SNAKE_CASE__ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE__ = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE__ = get_git_info()['repo_sha']
SCREAMING_SNAKE_CASE__ = hparams.num_workers
SCREAMING_SNAKE_CASE__ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE__ = self.decoder_start_token_id
SCREAMING_SNAKE_CASE__ = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE__ = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE__ = self.model.config.max_length
SCREAMING_SNAKE_CASE__ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def A_ ( self : List[str] , UpperCAmelCase_ : Dict[str, torch.Tensor] ):
SCREAMING_SNAKE_CASE__ = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(UpperCAmelCase_ , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
SCREAMING_SNAKE_CASE__ = True
return readable_batch
def A_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str ):
return self.model(UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : List[int] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer.batch_decode(
UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return lmap(str.strip , UpperCAmelCase_ )
def A_ ( self : List[Any] , UpperCAmelCase_ : dict ):
SCREAMING_SNAKE_CASE__ = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch['input_ids'], batch['attention_mask']
SCREAMING_SNAKE_CASE__ = batch['labels']
if isinstance(self.model , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = self.model._shift_right(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = shift_tokens_right(UpperCAmelCase_ , UpperCAmelCase_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE__ = decoder_input_ids
self.save_readable_batch(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE__ = nn.CrossEntropyLoss(ignore_index=UpperCAmelCase_ )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE__ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ = nn.functional.log_softmax(UpperCAmelCase_ , dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = label_smoothed_nll_loss(
UpperCAmelCase_ , UpperCAmelCase_ , self.hparams.label_smoothing , ignore_index=UpperCAmelCase_ )
return (loss,)
@property
def A_ ( self : Dict ):
return self.tokenizer.pad_token_id
def A_ ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = self._step(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = dict(zip(self.loss_names , UpperCAmelCase_ ) )
# tokens per batch
SCREAMING_SNAKE_CASE__ = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE__ = batch['input_ids'].shape[0]
SCREAMING_SNAKE_CASE__ = batch['input_ids'].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE__ = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def A_ ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple ):
return self._generative_step(UpperCAmelCase_ )
def A_ ( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple="val" ):
self.step_count += 1
SCREAMING_SNAKE_CASE__ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE__ = losses['loss']
SCREAMING_SNAKE_CASE__ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
SCREAMING_SNAKE_CASE__ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE__ = torch.tensor(UpperCAmelCase_ ).type_as(UpperCAmelCase_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {F'{prefix}_avg_{k}': x for k, x in losses.items()}
SCREAMING_SNAKE_CASE__ = self.step_count
self.metrics[prefix].append(UpperCAmelCase_ ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE__ = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'{prefix}_loss': loss,
F'{prefix}_{self.val_metric}': metric_tensor,
}
def A_ ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ):
return calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : str , UpperCAmelCase_ : dict ):
SCREAMING_SNAKE_CASE__ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE__ = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=UpperCAmelCase_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE__ = (time.time() - ta) / batch['input_ids'].shape[0]
SCREAMING_SNAKE_CASE__ = self.ids_to_clean_text(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.ids_to_clean_text(batch['labels'] )
SCREAMING_SNAKE_CASE__ = self._step(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = dict(zip(self.loss_names , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = self.calc_generative_metrics(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = np.mean(lmap(UpperCAmelCase_ , UpperCAmelCase_ ) )
base_metrics.update(gen_time=UpperCAmelCase_ , gen_len=UpperCAmelCase_ , preds=UpperCAmelCase_ , target=UpperCAmelCase_ , **UpperCAmelCase_ )
return base_metrics
def A_ ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int ):
return self._generative_step(UpperCAmelCase_ )
def A_ ( self : Any , UpperCAmelCase_ : List[str] ):
return self.validation_epoch_end(UpperCAmelCase_ , prefix='test' )
def A_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = self.n_obs[type_path]
SCREAMING_SNAKE_CASE__ = self.target_lens[type_path]
SCREAMING_SNAKE_CASE__ = self.dataset_class(
self.tokenizer , type_path=UpperCAmelCase_ , n_obs=UpperCAmelCase_ , max_target_length=UpperCAmelCase_ , **self.dataset_kwargs , )
return dataset
def A_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : bool = False ):
SCREAMING_SNAKE_CASE__ = self.get_dataset(UpperCAmelCase_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE__ = dataset.make_sortish_sampler(UpperCAmelCase_ , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , collate_fn=dataset.collate_fn , shuffle=UpperCAmelCase_ , num_workers=self.num_workers , sampler=UpperCAmelCase_ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE__ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCAmelCase_ , batch_sampler=UpperCAmelCase_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , collate_fn=dataset.collate_fn , shuffle=UpperCAmelCase_ , num_workers=self.num_workers , sampler=UpperCAmelCase_ , )
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=UpperCAmelCase_ )
return dataloader
def A_ ( self : str ):
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def A_ ( self : int ):
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def A_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
BaseTransformer.add_model_specific_args(UpperCAmelCase_ , UpperCAmelCase_ )
add_generic_args(UpperCAmelCase_ , UpperCAmelCase_ )
parser.add_argument(
'--max_source_length' , default=1024 , type=UpperCAmelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=56 , type=UpperCAmelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=142 , type=UpperCAmelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=142 , type=UpperCAmelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=UpperCAmelCase_ )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=UpperCAmelCase_ )
parser.add_argument('--max_tokens_per_batch' , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
parser.add_argument('--logger_name' , type=UpperCAmelCase_ , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=UpperCAmelCase_ , default=-1 , required=UpperCAmelCase_ , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=UpperCAmelCase_ , default=500 , required=UpperCAmelCase_ , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=UpperCAmelCase_ , default=-1 , required=UpperCAmelCase_ , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=UpperCAmelCase_ , default='summarization' , required=UpperCAmelCase_ , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=UpperCAmelCase_ , default=0.0 , required=UpperCAmelCase_ )
parser.add_argument('--src_lang' , type=UpperCAmelCase_ , default='' , required=UpperCAmelCase_ )
parser.add_argument('--tgt_lang' , type=UpperCAmelCase_ , default='' , required=UpperCAmelCase_ )
parser.add_argument('--eval_beams' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , required=UpperCAmelCase_ )
parser.add_argument(
'--val_metric' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , required=UpperCAmelCase_ , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=UpperCAmelCase_ , default=1 , required=UpperCAmelCase_ , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=UpperCAmelCase_ , default=-1 , required=UpperCAmelCase_ , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class lowercase__ ( _UpperCAmelCase ):
A__ : Optional[Any] ="""translation"""
A__ : Dict =["""loss"""]
A__ : Optional[int] =["""bleu"""]
A__ : Union[str, Any] ="""bleu"""
def __init__( self : Tuple , UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ):
super().__init__(UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = hparams.src_lang
SCREAMING_SNAKE_CASE__ = hparams.tgt_lang
def A_ ( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict ):
return calculate_bleu(UpperCAmelCase_ , UpperCAmelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_=None ) -> SummarizationModule:
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=UpperCamelCase_ )
check_output_dir(UpperCamelCase_ , expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE__ = SummarizationModule(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = TranslationModule(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
SCREAMING_SNAKE_CASE__ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE__ = os.environ.get('WANDB_PROJECT' , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = WandbLogger(name=model.output_dir.name , project=UpperCamelCase_ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE__ = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE__ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = args.val_metric == 'loss'
SCREAMING_SNAKE_CASE__ = generic_train(
UpperCamelCase_ , UpperCamelCase_ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , UpperCamelCase_ ) , early_stopping_callback=UpperCamelCase_ , logger=UpperCamelCase_ , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=UpperCamelCase_ ) )
if checkpoints:
SCREAMING_SNAKE_CASE__ = checkpoints[-1]
SCREAMING_SNAKE_CASE__ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = pl.Trainer.add_argparse_args(parser)
__snake_case = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__snake_case = parser.parse_args()
main(args)
| 169
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : Optional[Any] = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def _A (__a , __a , __a=8 ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE_ : Any = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : UNetaDConditionModel , lowercase_ : DDPMScheduler , lowercase_ : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = 2 ** (len(self.movq.config.block_out_channels) - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Tuple):
'''simple docstring'''
if latents is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_)
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}')
SCREAMING_SNAKE_CASE_ : Optional[int] = latents.to(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = latents * scheduler.init_noise_sigma
return latents
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : int=0):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
SCREAMING_SNAKE_CASE_ : Dict = torch.device(F'cuda:{gpu_id}')
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Dict=0):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''')
SCREAMING_SNAKE_CASE_ : Tuple = torch.device(F'cuda:{gpu_id}')
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase_)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE_ : int = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_)
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook'''):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_)
def __call__( self : Tuple , lowercase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ : int = 512 , lowercase_ : int = 512 , lowercase_ : int = 100 , lowercase_ : float = 4.0 , lowercase_ : int = 1 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self._execution_device
SCREAMING_SNAKE_CASE_ : Optional[int] = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_):
SCREAMING_SNAKE_CASE_ : List[str] = torch.cat(lowercase_ , dim=0)
SCREAMING_SNAKE_CASE_ : int = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowercase_ , lowercase_):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat(lowercase_ , dim=0)
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ : Optional[int] = image_embeds.repeat_interleave(lowercase_ , dim=0)
SCREAMING_SNAKE_CASE_ : Optional[Any] = negative_image_embeds.repeat_interleave(lowercase_ , dim=0)
SCREAMING_SNAKE_CASE_ : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowercase_)
self.scheduler.set_timesteps(lowercase_ , device=lowercase_)
SCREAMING_SNAKE_CASE_ : str = self.scheduler.timesteps
SCREAMING_SNAKE_CASE_ : Any = self.unet.config.in_channels
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor)
# create initial latent
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE_ : str = {'''image_embeds''': image_embeds}
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = noise_pred.split(latents.shape[1] , dim=1)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = noise_pred.chunk(2)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = variance_pred.chunk(2)
SCREAMING_SNAKE_CASE_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , '''variance_type''')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ : List[Any] = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
SCREAMING_SNAKE_CASE_ : Any = self.movq.decode(lowercase_ , force_not_quantize=lowercase_)['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}')
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE_ : Any = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE_ : Optional[Any] = image.clamp(0 , 1)
SCREAMING_SNAKE_CASE_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : int = self.numpy_to_pil(lowercase_)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_)
| 91
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
UpperCAmelCase_ : int = logging.get_logger(__name__)
def _A (__a ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(__a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__a , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__a ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["pixel_values"]
def __init__( self : Dict , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Dict , ):
'''simple docstring'''
super().__init__(**lowercase_)
SCREAMING_SNAKE_CASE_ : str = size if size is not None else {'''shortest_edge''': 256}
SCREAMING_SNAKE_CASE_ : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase_ , param_name='''crop_size''')
SCREAMING_SNAKE_CASE_ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE_ : List[Any] = size
SCREAMING_SNAKE_CASE_ : Tuple = do_center_crop
SCREAMING_SNAKE_CASE_ : Dict = crop_size
SCREAMING_SNAKE_CASE_ : List[Any] = resample
SCREAMING_SNAKE_CASE_ : List[str] = do_rescale
SCREAMING_SNAKE_CASE_ : List[str] = rescale_factor
SCREAMING_SNAKE_CASE_ : List[Any] = offset
SCREAMING_SNAKE_CASE_ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE_ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE_ : List[Any] = get_resize_output_image_size(lowercase_ , size['''shortest_edge'''] , default_to_square=lowercase_)
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}')
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}')
return center_crop(lowercase_ , size=(size['''height'''], size['''width''']) , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : bool = True , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = image.astype(np.floataa)
if offset:
SCREAMING_SNAKE_CASE_ : Tuple = image - (scale / 2)
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ):
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''')
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : List[str] = to_numpy_array(lowercase_)
if do_resize:
SCREAMING_SNAKE_CASE_ : List[Any] = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_)
if do_center_crop:
SCREAMING_SNAKE_CASE_ : Dict = self.center_crop(lowercase_ , size=lowercase_)
if do_rescale:
SCREAMING_SNAKE_CASE_ : int = self.rescale(image=lowercase_ , scale=lowercase_ , offset=lowercase_)
if do_normalize:
SCREAMING_SNAKE_CASE_ : Dict = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = to_channel_dimension_format(lowercase_ , lowercase_)
return image
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : int = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Dict = offset if offset is not None else self.offset
SCREAMING_SNAKE_CASE_ : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : Dict = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : List[str] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(lowercase_ , param_name='''crop_size''')
if not valid_images(lowercase_):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
SCREAMING_SNAKE_CASE_ : Tuple = make_batched(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[
self._preprocess_image(
image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , offset=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , )
for img in video
]
for video in videos
]
SCREAMING_SNAKE_CASE_ : int = {'''pixel_values''': videos}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
| 91
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : Any = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
| 7
| 1
|
'''simple docstring'''
from __future__ import annotations
from random import choice
def __lowerCamelCase ( lowerCAmelCase_ ) -> Tuple:
return choice(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_a : Tuple = random_pivot(lowerCAmelCase_ )
# partition based on pivot
# linear time
_a : Union[str, Any] = [e for e in lst if e < pivot]
_a : Tuple = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowerCAmelCase_ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowerCAmelCase_ ) < k - 1:
return kth_number(lowerCAmelCase_ , k - len(lowerCAmelCase_ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89
|
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = BarthezTokenizer
lowerCAmelCase : int = BarthezTokenizerFast
lowerCAmelCase : Dict = True
lowerCAmelCase : str = True
def __lowercase ( self : List[Any] ):
super().setUp()
_a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase )
_a : Union[str, Any] = tokenizer
def __lowercase ( self : Tuple ):
_a : Optional[Any] = '<pad>'
_a : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(_UpperCAmelCase ) ,101122 )
def __lowercase ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size ,101122 )
@require_torch
def __lowercase ( self : Dict ):
_a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_a : Dict = [0, 57, 3018, 70307, 91, 2]
_a : Dict = self.tokenizer(
_UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
_a : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
_a : str = self.get_tokenizer()
_a : List[str] = self.get_rust_tokenizer()
_a : Dict = 'I was born in 92000, and this is falsé.'
_a : List[Any] = tokenizer.tokenize(_UpperCAmelCase )
_a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] = self.get_rust_tokenizer()
_a : Any = tokenizer.encode(_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[int] ):
# fmt: off
_a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_a : Optional[Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
| 89
| 1
|
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
UpperCAmelCase_ : Union[str, Any] = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
UpperCAmelCase_ : Any = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _A (__a , __a=False ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = create_model(
'''HTSAT-tiny''' , '''roberta''' , _lowerCamelCase , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=_lowerCamelCase , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = {}
SCREAMING_SNAKE_CASE_ : Any = R""".*sequential.(\d+).*"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE_ : List[str] = key.replace(_lowerCamelCase , _lowerCamelCase )
if re.match(_lowerCamelCase , _lowerCamelCase ):
# replace sequential layers with list
SCREAMING_SNAKE_CASE_ : Optional[int] = re.match(_lowerCamelCase , _lowerCamelCase ).group(1 )
SCREAMING_SNAKE_CASE_ : List[str] = key.replace(f'sequential.{sequential_layer}.' , f'layers.{int(_lowerCamelCase )//3}.linear.' )
elif re.match(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE_ : Dict = int(re.match(_lowerCamelCase , _lowerCamelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE_ : Tuple = 1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE_ : List[Any] = key.replace(f'_projection.{projecton_layer}.' , f'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE_ : Any = value
SCREAMING_SNAKE_CASE_ : Tuple = mixed_qkv.size(0 ) // 3
SCREAMING_SNAKE_CASE_ : Dict = mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE_ : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = query_layer
SCREAMING_SNAKE_CASE_ : Dict = key_layer
SCREAMING_SNAKE_CASE_ : Optional[int] = value_layer
else:
SCREAMING_SNAKE_CASE_ : Dict = value
return model_state_dict
def _A (__a , __a , __a , __a=False ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = init_clap(_lowerCamelCase , enable_fusion=_lowerCamelCase )
clap_model.eval()
SCREAMING_SNAKE_CASE_ : int = clap_model.state_dict()
SCREAMING_SNAKE_CASE_ : Dict = rename_state_dict(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Any = ClapConfig()
SCREAMING_SNAKE_CASE_ : Any = enable_fusion
SCREAMING_SNAKE_CASE_ : Optional[Any] = ClapModel(_lowerCamelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
transformers_config.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
UpperCAmelCase_ : List[Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 363
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 318
| 0
|
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__UpperCAmelCase = "http://www.mocksite.com/file1.txt"
__UpperCAmelCase = "\"text\": [\"foo\", \"foo\"]"
__UpperCAmelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase_ =200
UpperCAmelCase_ ={"Content-Length": "100"}
UpperCAmelCase_ ={}
def _UpperCamelCase ( self , **_A ) -> int:
return [bytes(_A , '''utf-8''' )]
def A__ ( *__lowerCamelCase, **__lowerCamelCase ):
return MockResponse()
@pytest.mark.parametrize('''urls_type''', [str, list, dict] )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
import requests
monkeypatch.setattr(__lowerCamelCase, '''request''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = URL
if issubclass(__lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = url
elif issubclass(__lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [url]
elif issubclass(__lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {'''train''': url}
SCREAMING_SNAKE_CASE_ = '''dummy'''
SCREAMING_SNAKE_CASE_ = '''downloads'''
SCREAMING_SNAKE_CASE_ = tmp_path
SCREAMING_SNAKE_CASE_ = DownloadConfig(
cache_dir=os.path.join(__lowerCamelCase, __lowerCamelCase ), use_etag=__lowerCamelCase, )
SCREAMING_SNAKE_CASE_ = DownloadManager(dataset_name=__lowerCamelCase, download_config=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = dl_manager.download(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [downloaded_paths]
SCREAMING_SNAKE_CASE_ = [urls]
elif isinstance(__lowerCamelCase, __lowerCamelCase ):
assert "train" in downloaded_paths.keys()
SCREAMING_SNAKE_CASE_ = downloaded_paths.values()
SCREAMING_SNAKE_CASE_ = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowerCamelCase, __lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
SCREAMING_SNAKE_CASE_ = Path(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
SCREAMING_SNAKE_CASE_ = downloaded_path.read_text()
assert content == CONTENT
SCREAMING_SNAKE_CASE_ = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
SCREAMING_SNAKE_CASE_ = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''', [str, list, dict] )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = str(__lowerCamelCase )
if issubclass(__lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = filename
elif issubclass(__lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [filename]
elif issubclass(__lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {'''train''': filename}
SCREAMING_SNAKE_CASE_ = '''dummy'''
SCREAMING_SNAKE_CASE_ = xz_file.parent
SCREAMING_SNAKE_CASE_ = '''extracted'''
SCREAMING_SNAKE_CASE_ = DownloadConfig(
cache_dir=__lowerCamelCase, use_etag=__lowerCamelCase, )
SCREAMING_SNAKE_CASE_ = DownloadManager(dataset_name=__lowerCamelCase, download_config=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = dl_manager.extract(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [extracted_paths]
SCREAMING_SNAKE_CASE_ = [paths]
elif isinstance(__lowerCamelCase, __lowerCamelCase ):
assert "train" in extracted_paths.keys()
SCREAMING_SNAKE_CASE_ = extracted_paths.values()
SCREAMING_SNAKE_CASE_ = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowerCamelCase, __lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
SCREAMING_SNAKE_CASE_ = Path(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowerCamelCase, etag=__lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
SCREAMING_SNAKE_CASE_ = extracted_path.read_text()
SCREAMING_SNAKE_CASE_ = text_file.read_text()
assert extracted_file_content == expected_file_content
def A__ ( __lowerCamelCase, __lowerCamelCase ):
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__lowerCamelCase, start=1 ):
SCREAMING_SNAKE_CASE_ = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''', ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = request.getfixturevalue(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ), start=1 ):
_test_jsonl(__lowerCamelCase, __lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''', ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = request.getfixturevalue(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ), start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCamelCase ), start=1 ):
_test_jsonl(__lowerCamelCase, __lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowerCamelCase ), start=1 ):
assert os.path.basename(__lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 299
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A = None , _A = None ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
SCREAMING_SNAKE_CASE_ = torch.zeros(_A , _A )
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(_A )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
def __init__( self , _A , _A , _A , _A , _A , _A , ) -> Any:
super().__init__()
self.register_modules(
vqvae=_A , transformer=_A , text_encoder=_A , tokenizer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , )
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = len(_A ) if isinstance(_A , _A ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
SCREAMING_SNAKE_CASE_ = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
SCREAMING_SNAKE_CASE_ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE_ = prompt_embeds.repeat_interleave(_A , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
SCREAMING_SNAKE_CASE_ = self.learned_classifier_free_sampling_embeddings.embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.unsqueeze(0 ).repeat(_A , 1 , 1 )
else:
SCREAMING_SNAKE_CASE_ = [''''''] * batch_size
SCREAMING_SNAKE_CASE_ = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.repeat(1 , _A , 1 )
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _A , _A = 100 , _A = 5.0 , _A = 1.0 , _A = 1 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = 1
elif isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = len(_A )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_A )}''' )
SCREAMING_SNAKE_CASE_ = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0
SCREAMING_SNAKE_CASE_ = self._encode_prompt(_A , _A , _A )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_A )}.''' )
# get the initial completely masked latents unless the user supplied it
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
SCREAMING_SNAKE_CASE_ = self.transformer.num_vector_embeds - 1
SCREAMING_SNAKE_CASE_ = torch.full(_A , _A ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
SCREAMING_SNAKE_CASE_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A , device=self.device )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE_ = latents
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the sample if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
SCREAMING_SNAKE_CASE_ = self.transformer(_A , encoder_hidden_states=_A , timestep=_A ).sample
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model_output.chunk(2 )
SCREAMING_SNAKE_CASE_ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_A , dim=1 , keepdim=_A )
SCREAMING_SNAKE_CASE_ = self.truncate(_A , _A )
# remove `log(0)`'s (`-inf`s)
SCREAMING_SNAKE_CASE_ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , timestep=_A , sample=_A , generator=_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = self.vqvae.config.vq_embed_dim
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
SCREAMING_SNAKE_CASE_ = self.vqvae.quantize.get_codebook_entry(_A , shape=_A )
SCREAMING_SNAKE_CASE_ = self.vqvae.decode(_A , force_not_quantize=_A ).sample
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
def _UpperCamelCase ( self , _A , _A ) -> torch.FloatTensor:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.sort(_A , 1 , descending=_A )
SCREAMING_SNAKE_CASE_ = torch.exp(_A )
SCREAMING_SNAKE_CASE_ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
SCREAMING_SNAKE_CASE_ = torch.full_like(keep_mask[:, 0:1, :] , _A )
SCREAMING_SNAKE_CASE_ = torch.cat((all_true, keep_mask) , dim=1 )
SCREAMING_SNAKE_CASE_ = keep_mask[:, :-1, :]
SCREAMING_SNAKE_CASE_ = keep_mask.gather(1 , indices.argsort(1 ) )
SCREAMING_SNAKE_CASE_ = log_p_x_0.clone()
SCREAMING_SNAKE_CASE_ = -torch.inf # -inf = log(0)
return rv
| 299
| 1
|
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def a__ ( __lowercase , __lowercase , __lowercase = None ) -> str:
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_A = quote(__lowercase )
return hfh.hf_hub_url(__lowercase , __lowercase , repo_type="dataset" , revision=__lowercase )
| 350
|
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def a__ ( __lowercase , __lowercase , __lowercase = 1_6000 ) -> List[str]:
_A = int(round(sample_rate * max_length ) )
if len(__lowercase ) <= sample_length:
return wav
_A = randint(0 , len(__lowercase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class snake_case :
__UpperCamelCase = field(default=_UpperCamelCase , metadata={'help': 'Name of a dataset from the datasets package'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'A file containing the training audio paths and labels.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'A file containing the validation audio paths and labels.'})
__UpperCamelCase = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
__UpperCamelCase = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
__UpperCamelCase = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
__UpperCamelCase = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
__UpperCamelCase = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class snake_case :
__UpperCamelCase = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
__UpperCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , a__ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def a__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , __lowercase , __lowercase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
_A = DatasetDict()
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"Make sure to set `--label_column_name` to the correct text column - one of "
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_A = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_A = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_A = feature_extractor.model_input_names[0]
def train_transforms(__lowercase ):
_A = []
for audio in batch[data_args.audio_column_name]:
_A = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__lowercase )
_A = feature_extractor(__lowercase , sampling_rate=feature_extractor.sampling_rate )
_A = {model_input_name: inputs.get(__lowercase )}
_A = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__lowercase ):
_A = [audio["array"] for audio in batch[data_args.audio_column_name]]
_A = feature_extractor(__lowercase , sampling_rate=feature_extractor.sampling_rate )
_A = {model_input_name: inputs.get(__lowercase )}
_A = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_A = raw_datasets["train"].features[data_args.label_column_name].names
_A , _A = {}, {}
for i, label in enumerate(__lowercase ):
_A = str(__lowercase )
_A = label
# Load the accuracy metric from the datasets package
_A = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__lowercase ):
_A = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__lowercase , references=eval_pred.label_ids )
_A = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowercase ) , labelaid=__lowercase , idalabel=__lowercase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_A = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__lowercase , output_all_columns=__lowercase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_A = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__lowercase , output_all_columns=__lowercase )
# Initialize our trainer
_A = Trainer(
model=__lowercase , args=__lowercase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=__lowercase , tokenizer=__lowercase , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_A = trainer.evaluate()
trainer.log_metrics("eval" , __lowercase )
trainer.save_metrics("eval" , __lowercase )
# Write model card and (optionally) push to hub
_A = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
if __name__ == "__main__":
main()
| 163
| 0
|
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Optional[Any] = GPTSwaTokenizer
lowercase_ : Any = False
lowercase_ : int = True
lowercase_ : Optional[int] = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A_ : Dict = GPTSwaTokenizer(snake_case_ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Dict = 'This is a test'
A_ : int = 'This is a test'
return input_text, output_text
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : str = '<s>'
A_ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(snake_case_ ) , 2_0_0_0 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : str = GPTSwaTokenizer(snake_case_ )
A_ : Any = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
A_ : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
snake_case_ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
A_ : Any = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(
snake_case_ , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
A_ : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case_ )
# fmt: off
self.assertListEqual(
snake_case_ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = GPTSwaTokenizer(snake_case_ )
A_ : Union[str, Any] = ['This is a test', 'I was born in 92000, and this is falsé.']
A_ : int = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(snake_case_ , snake_case_ ):
self.assertListEqual(tokenizer.encode_fast(snake_case_ ) , snake_case_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(snake_case_ , snake_case_ ):
self.assertEqual(tokenizer.decode_fast(snake_case_ ) , snake_case_ )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
A_ : Union[str, Any] = {'input_ids': [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=snake_case_ , )
| 286
|
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = R'\w+[.]\d+'
A_ : int = re.findall(_UpperCAmelCase , _UpperCAmelCase )
for pat in pats:
A_ : Optional[int] = key.replace(_UpperCAmelCase , '_'.join(pat.split('.' ) ) )
return key
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A_ : Union[str, Any] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A_ : List[str] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A_ : Optional[Any] = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A_ : int = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A_ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A_ : Optional[Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A_ : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A_ : Tuple = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A_ : Optional[int] = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=42 ):
"""simple docstring"""
A_ : int = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A_ : Union[str, Any] = flax_model.init_weights(PRNGKey(_UpperCAmelCase ) )
A_ : Optional[Any] = flatten_dict(_UpperCAmelCase )
A_ : Tuple = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A_ : Any = rename_key(_UpperCAmelCase )
A_ : List[str] = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A_ , A_ : Union[str, Any] = rename_key_and_reshape_tensor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
A_ : str = jnp.asarray(_UpperCAmelCase )
return unflatten_dict(_UpperCAmelCase )
| 286
| 1
|
'''simple docstring'''
import argparse
import os
import re
a_ : Optional[Any] = "src/transformers"
# Pattern that looks at the indentation in a line.
a_ : List[Any] = re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
a_ : List[str] = re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
a_ : Dict = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
a_ : Optional[int] = re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
a_ : Tuple = re.compile(R"\[([^\]]+)\]")
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
_a = _re_indent.search(lowerCAmelCase__ )
return "" if search is None else search.groups()[0]
def _A (lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str="" , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Dict=None ) -> Dict:
'''simple docstring'''
_a = 0
_a = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(lowerCAmelCase__ ):
index += 1
_a = ['\n'.join(lines[:index] )]
else:
_a = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_a = [lines[index]]
index += 1
while index < len(lowerCAmelCase__ ) and (end_prompt is None or not lines[index].startswith(lowerCAmelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCAmelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(lowerCAmelCase__ ) )
if index < len(lowerCAmelCase__ ) - 1:
_a = [lines[index + 1]]
index += 1
else:
_a = []
else:
blocks.append('\n'.join(lowerCAmelCase__ ) )
_a = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCAmelCase__ ) > 0:
blocks.append('\n'.join(lowerCAmelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCAmelCase__ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _A (lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
def _inner(lowerCAmelCase__ :List[Any] ):
return key(lowerCAmelCase__ ).lower().replace('_' , '' )
return _inner
def _A (lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
def noop(lowerCAmelCase__ :int ):
return x
if key is None:
_a = noop
# Constants are all uppercase, they go first.
_a = [obj for obj in objects if key(lowerCAmelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_a = [obj for obj in objects if key(lowerCAmelCase__ )[0].isupper() and not key(lowerCAmelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
_a = [obj for obj in objects if not key(lowerCAmelCase__ )[0].isupper()]
_a = ignore_underscore(lowerCAmelCase__ )
return sorted(lowerCAmelCase__ , key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ , key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ , key=lowerCAmelCase__ )
def _A (lowerCAmelCase__ :List[Any] ) -> List[Any]:
'''simple docstring'''
def _replace(lowerCAmelCase__ :List[str] ):
_a = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_a = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_a = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(lowerCAmelCase__ )] ) + "]"
_a = import_statement.split('\n' )
if len(lowerCAmelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_a = 2 if lines[1].strip() == '[' else 1
_a = [(i, _re_strip_line.search(lowerCAmelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_a = sort_objects(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[1] )
_a = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCAmelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_a = _re_bracket_content.sub(_replace , lines[1] )
else:
_a = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_a = keys[:-1]
_a = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(lowerCAmelCase__ )] )
return "\n".join(lowerCAmelCase__ )
else:
# Finally we have to deal with imports fitting on one line
_a = _re_bracket_content.sub(_replace , lowerCAmelCase__ )
return import_statement
def _A (lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any=True ) -> Dict:
'''simple docstring'''
with open(lowerCAmelCase__ , encoding='utf-8' ) as f:
_a = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_a = split_code_in_indented_blocks(
lowerCAmelCase__ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCAmelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_a = main_blocks[block_idx]
_a = block.split('\n' )
# Get to the start of the imports.
_a = 0
while line_idx < len(lowerCAmelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_a = len(lowerCAmelCase__ )
else:
line_idx += 1
if line_idx >= len(lowerCAmelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
_a = '\n'.join(block_lines[line_idx:-1] )
_a = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_a = split_code_in_indented_blocks(lowerCAmelCase__ , indent_level=lowerCAmelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
_a = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_a = [(pattern.search(lowerCAmelCase__ ).groups()[0] if pattern.search(lowerCAmelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_a = [(i, key) for i, key in enumerate(lowerCAmelCase__ ) if key is not None]
_a = [x[0] for x in sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_a = 0
_a = []
for i in range(len(lowerCAmelCase__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_a = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCAmelCase__ )
count += 1
# And we put our main block back together with its first and last line.
_a = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCAmelCase__ ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(lowerCAmelCase__ ) )
def _A (lowerCAmelCase__ :List[Any]=True ) -> Tuple:
'''simple docstring'''
_a = []
for root, _, files in os.walk(lowerCAmelCase__ ):
if "__init__.py" in files:
_a = sort_imports(os.path.join(lowerCAmelCase__ , '__init__.py' ) , check_only=lowerCAmelCase__ )
if result:
_a = [os.path.join(lowerCAmelCase__ , '__init__.py' )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(f'Would overwrite {len(lowerCAmelCase__ )} files, run `make style`.' )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
a_ : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 104
|
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class a ( unittest.TestCase ):
def __UpperCAmelCase ( self ) -> int:
_a = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__magic_name__ ) )
def __UpperCAmelCase ( self ) -> Dict:
_a = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__magic_name__ ) )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__magic_name__ ) )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__magic_name__ ) )
def __UpperCAmelCase ( self ) -> str:
_a = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__magic_name__ ) )
def __UpperCAmelCase ( self ) -> List[Any]:
_a = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> Any:
_a = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> int:
# pass variant but use the non-variant filenames
_a = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
_a = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> List[str]:
_a = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = 'fp16'
self.assertFalse(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
_a = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> List[Any]:
# pass variant but use the non-variant filenames
_a = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
_a = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> List[Any]:
_a = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = 'fp16'
self.assertFalse(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
| 104
| 1
|
'''simple docstring'''
from collections import defaultdict
def __lowerCamelCase ( _lowercase , _lowercase ) -> bool:
UpperCAmelCase : int = first_str.lower().strip()
UpperCAmelCase : Tuple = second_str.lower().strip()
# Remove whitespace
UpperCAmelCase : List[Any] = first_str.replace(""" """ , """""" )
UpperCAmelCase : Optional[Any] = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(_lowercase ) != len(_lowercase ):
return False
# Default values for count should be 0
UpperCAmelCase : defaultdict[str, int] = defaultdict(_lowercase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_lowercase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
a : str = input("""Enter the first string """).strip()
a : Union[str, Any] = input("""Enter the second string """).strip()
a : Any = check_anagrams(input_a, input_b)
print(F'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 265
|
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
else:
return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> float:
if b < 0:
return 1 / actual_power(_lowercase , _lowercase )
return actual_power(_lowercase , _lowercase )
if __name__ == "__main__":
print(power(-2, -3))
| 265
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCAmelCase : str = None
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Dict = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : Any = {
"""facebook/nllb-large-en-ro""": 1024,
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
_UpperCAmelCase : Dict = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[int] = ['''input_ids''', '''attention_mask''']
__SCREAMING_SNAKE_CASE : Tuple = NllbTokenizer
__SCREAMING_SNAKE_CASE : List[int] = []
__SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self , snake_case=None , snake_case=None , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=None , snake_case=None , snake_case=None , snake_case=False , **snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
snake_case_ = legacy_behaviour
super().__init__(
vocab_file=snake_case , tokenizer_file=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , src_lang=snake_case , tgt_lang=snake_case , additional_special_tokens=snake_case , legacy_behaviour=snake_case , **snake_case , )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
snake_case_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
snake_case_ = {
lang_code: self.convert_tokens_to_ids(snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ = src_lang if src_lang is not None else 'eng_Latn'
snake_case_ = self.convert_tokens_to_ids(self._src_lang )
snake_case_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a ( self ):
return self._src_lang
@src_lang.setter
def a ( self , snake_case ):
snake_case_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a ( self , snake_case , snake_case = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a ( self , snake_case , snake_case = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , snake_case , snake_case , snake_case , snake_case , **snake_case ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
snake_case_ = src_lang
snake_case_ = self(snake_case , add_special_tokens=snake_case , return_tensors=snake_case , **snake_case )
snake_case_ = self.convert_tokens_to_ids(snake_case )
snake_case_ = tgt_lang_id
return inputs
def a ( self , snake_case , snake_case = "eng_Latn" , snake_case = None , snake_case = "fra_Latn" , **snake_case , ):
snake_case_ = src_lang
snake_case_ = tgt_lang
return super().prepare_seqaseq_batch(snake_case , snake_case , **snake_case )
def a ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def a ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a ( self , snake_case ):
snake_case_ = self.convert_tokens_to_ids(snake_case )
if self.legacy_behaviour:
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ = [self.cur_lang_code]
snake_case_ = [self.eos_token_id]
snake_case_ = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case ):
snake_case_ = self.convert_tokens_to_ids(snake_case )
if self.legacy_behaviour:
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ = [self.cur_lang_code]
snake_case_ = [self.eos_token_id]
snake_case_ = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case , snake_case = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
snake_case_ = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file , snake_case )
return (out_vocab_file,)
| 356
|
import requests
_UpperCAmelCase : Union[str, Any] = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 200
| 0
|
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowerCAmelCase = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
_a : List[str] = test_results.split(' ' )
_a : Any = 0
_a : Union[str, Any] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_a : Any = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCAmelCase_ (__a : Any ):
"""simple docstring"""
_a : Optional[Any] = {}
_a : Tuple = None
_a : str = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , __a ):
_a : Tuple = True
_a : str = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
_a : Dict = line
_a : List[Any] = False
return failures
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple ,_a : str ,_a : Dict ):
'''simple docstring'''
_a : Tuple = title
_a : Tuple = doc_test_results['time_spent'].split(',' )[0]
_a : List[str] = doc_test_results['success']
_a : List[Any] = doc_test_results['failures']
_a : str = self.n_success + self.n_failures
# Failures and success of the modeling tests
_a : Union[str, Any] = doc_test_results
@property
def __lowercase ( self : str ):
'''simple docstring'''
_a : Any = [self._time_spent]
_a : Union[str, Any] = 0
for time in time_spent:
_a : Any = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_a ) == 1:
_a : Optional[int] = [0, 0, time_parts[0]]
_a, _a, _a : Union[str, Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
_a, _a, _a : List[str] = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F"""{int(_a )}h{int(_a )}m{int(_a )}s"""
@property
def __lowercase ( self : Any ):
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __lowercase ( self : int ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Any = 40
_a : List[Any] = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(_a ,_a )}
_a : Any = ''
for category, failures in category_failures.items():
if len(_a ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_a )
@staticmethod
def __lowercase ( ):
'''simple docstring'''
_a : Optional[int] = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(_a )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,text='There was an issue running the tests.' ,blocks=_a ,)
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
_a : Optional[int] = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else 'All tests passed.'
_a : Union[str, Any] = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,blocks=self.payload ,text=_a ,)
def __lowercase ( self : Dict ,_a : List[Any] ,_a : Any ,_a : List[Any] ,_a : Tuple ):
'''simple docstring'''
_a : List[Any] = ''
for key, value in failures.items():
_a : Dict = value[:200] + ' [Truncated]' if len(_a ) > 250 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
_a : Optional[int] = job_name
_a : Optional[Any] = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
_a : str = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __lowercase ( self : Any ):
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
_a : int = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
_a : Tuple = sorted(self.doc_test_results.items() ,key=lambda _a : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
_a : Optional[Any] = F"""*Num failures* :{len(job_result['failed'] )} \n"""
_a : str = job_result['failures']
_a : str = self.get_reply_blocks(_a ,_a ,_a ,text=_a )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,text=F"""Results for {job}""" ,blocks=_a ,thread_ts=self.thread_ts['ts'] ,)
time.sleep(1 )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = os.environ['GITHUB_RUN_ID']
_a : List[Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
_a : Union[str, Any] = requests.get(__a ).json()
_a : Union[str, Any] = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
_a : Tuple = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(__a ):
_a : Optional[Any] = requests.get(url + f"""&page={i + 2}""" ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , __a )
return {}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Optional[int] = {}
if os.path.exists(__a ):
_a : List[Any] = os.listdir(__a )
for file in files:
try:
with open(os.path.join(__a , __a ) , encoding='utf-8' ) as f:
_a : int = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"""Could not open {os.path.join(__a , __a )}.""" ) from e
return _artifact
def UpperCAmelCase_ ():
"""simple docstring"""
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] ,_a : str ):
'''simple docstring'''
_a : Union[str, Any] = name
_a : str = []
def __str__( self : Tuple ):
'''simple docstring'''
return self.name
def __lowercase ( self : Dict ,_a : str ):
'''simple docstring'''
self.paths.append({'name': self.name, 'path': path} )
_a : Dict[str, Artifact] = {}
_a : Any = filter(os.path.isdir , os.listdir() )
for directory in directories:
_a : Dict = directory
if artifact_name not in _available_artifacts:
_a : int = Artifact(__a )
_available_artifacts[artifact_name].add_path(__a )
return _available_artifacts
if __name__ == "__main__":
__lowerCAmelCase = get_job_links()
__lowerCAmelCase = retrieve_available_artifacts()
__lowerCAmelCase = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowerCAmelCase = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowerCAmelCase = github_actions_job_links.get("""run_doctests""")
__lowerCAmelCase = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__lowerCAmelCase = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = handle_test_results(artifact["""stats"""])
__lowerCAmelCase = failed
__lowerCAmelCase = success
__lowerCAmelCase = time_spent[1:-1] + """, """
__lowerCAmelCase = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__lowerCAmelCase = line.replace("""FAILED """, """""")
__lowerCAmelCase = line.split()[0].replace("""\n""", """""")
if "::" in line:
__lowerCAmelCase , __lowerCAmelCase = line.split("""::""")
else:
__lowerCAmelCase , __lowerCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowerCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowerCAmelCase = all_failures[test] if test in all_failures else """N/A"""
__lowerCAmelCase = failure
break
__lowerCAmelCase = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 271
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Any = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Union[str, Any] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=3 ,)
return model
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Dict = self.dummy_uncond_unet
_a : List[Any] = DDIMScheduler()
_a : List[Any] = self.dummy_vq_model
_a : str = LDMPipeline(unet=_a ,vqvae=_a ,scheduler=_a )
ldm.to(_a )
ldm.set_progress_bar_config(disable=_a )
_a : List[str] = torch.manual_seed(0 )
_a : List[str] = ldm(generator=_a ,num_inference_steps=2 ,output_type='numpy' ).images
_a : List[str] = torch.manual_seed(0 )
_a : Union[str, Any] = ldm(generator=_a ,num_inference_steps=2 ,output_type='numpy' ,return_dict=_a )[0]
_a : Tuple = image[0, -3:, -3:, -1]
_a : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : int = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
_a : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : List[str] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(_a )
ldm.set_progress_bar_config(disable=_a )
_a : Optional[int] = torch.manual_seed(0 )
_a : Dict = ldm(generator=_a ,num_inference_steps=5 ,output_type='numpy' ).images
_a : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_a : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
_a : int = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 271
| 1
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__="None" , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ) -> str:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = relative_attention
__lowerCamelCase = position_biased_input
__lowerCamelCase = pos_att_type
__lowerCamelCase = scope
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = TFDebertaVaModel(config=lowerCamelCase__ )
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = TFDebertaVaForMaskedLM(config=lowerCamelCase__ )
__lowerCamelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFDebertaVaForSequenceClassification(config=lowerCamelCase__ )
__lowerCamelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFDebertaVaForTokenClassification(config=lowerCamelCase__ )
__lowerCamelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = TFDebertaVaForQuestionAnswering(config=lowerCamelCase__ )
__lowerCamelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = TFDebertaVaModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def lowercase_ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
pass
@slow
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
__lowerCamelCase = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__lowerCamelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
__lowerCamelCase = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1e-4 )
| 348
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__A = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''mask2former'''
snake_case_ = ['''swin''']
snake_case_ = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1_024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2_048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12_544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Tuple:
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowerCamelCase = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = backbone_config.pop('model_type' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(lowerCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
__lowerCamelCase = backbone_config
__lowerCamelCase = feature_size
__lowerCamelCase = mask_feature_size
__lowerCamelCase = hidden_dim
__lowerCamelCase = encoder_feedforward_dim
__lowerCamelCase = activation_function
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = dim_feedforward
__lowerCamelCase = pre_norm
__lowerCamelCase = enforce_input_projection
__lowerCamelCase = common_stride
__lowerCamelCase = ignore_value
__lowerCamelCase = num_queries
__lowerCamelCase = no_object_weight
__lowerCamelCase = class_weight
__lowerCamelCase = mask_weight
__lowerCamelCase = dice_weight
__lowerCamelCase = train_num_points
__lowerCamelCase = oversample_ratio
__lowerCamelCase = importance_sample_ratio
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = feature_strides
__lowerCamelCase = output_auxiliary_logits
__lowerCamelCase = decoder_layers
super().__init__(**lowerCamelCase__ )
@classmethod
def lowercase_ ( cls , lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
return cls(
backbone_config=lowerCamelCase__ , **lowerCamelCase__ , )
def lowercase_ ( self ) -> Dict[str, any]:
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 348
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase : List[str] = 1_6
_lowerCAmelCase : List[Any] = 3_2
def lowerCAmelCase ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
UpperCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCAmelCase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__ = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase__ = 8
else:
UpperCAmelCase__ = None
return tokenizer.pad(
_lowerCAmelCase , padding="longest" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase__ = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
UpperCAmelCase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase : int = mocked_dataloaders # noqa: F811
def lowerCAmelCase ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCAmelCase ) == "1":
UpperCAmelCase__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ = config["lr"]
UpperCAmelCase__ = int(config["num_epochs"] )
UpperCAmelCase__ = int(config["seed"] )
UpperCAmelCase__ = int(config["batch_size"] )
set_seed(_lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
# Instantiate scheduler
UpperCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase__ = os.path.split(_lowerCAmelCase )[-1].split("." )[0]
accelerator.init_trackers(_lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase__ = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase__ = model(**_lowerCAmelCase )
UpperCAmelCase__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ = model(**_lowerCAmelCase )
UpperCAmelCase__ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
UpperCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowerCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(_lowerCAmelCase ),
"epoch": epoch,
} , step=_lowerCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=_lowerCAmelCase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 169
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_lowerCAmelCase : int = get_logger(__name__)
_lowerCAmelCase : Any = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class _UpperCamelCase :
@add_start_docstrings(lowerCamelCase )
def __call__( self :Tuple , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _UpperCamelCase :
@add_start_docstrings(lowerCamelCase )
def __call__( self :Union[str, Any] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _UpperCamelCase ( lowerCAmelCase ):
@add_start_docstrings(lowerCamelCase )
def __call__( self :List[Any] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int , **lowerCamelCase :str ) -> jnp.ndarray:
for processor in self:
UpperCAmelCase__ = inspect.signature(processor.__call__ ).parameters
if len(lowerCamelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
UpperCAmelCase__ = processor(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase )
else:
UpperCAmelCase__ = processor(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :str , lowerCamelCase :float ) -> Tuple:
if not isinstance(lowerCamelCase , lowerCamelCase ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
UpperCAmelCase__ = temperature
def __call__( self :int , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ = scores / self.temperature
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Optional[int] , lowerCamelCase :float , lowerCamelCase :float = -float("Inf" ) , lowerCamelCase :int = 1 ) -> Union[str, Any]:
if not isinstance(lowerCamelCase , lowerCamelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(lowerCamelCase , lowerCamelCase ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
UpperCAmelCase__ = top_p
UpperCAmelCase__ = filter_value
UpperCAmelCase__ = min_tokens_to_keep
def __call__( self :Tuple , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ , UpperCAmelCase__ = lax.top_k(lowerCamelCase , scores.shape[-1] )
UpperCAmelCase__ = jnp.full_like(lowerCamelCase , self.filter_value )
UpperCAmelCase__ = jax.nn.softmax(lowerCamelCase , axis=-1 ).cumsum(axis=-1 )
UpperCAmelCase__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCAmelCase__ = jnp.roll(lowerCamelCase , 1 )
score_mask |= score_mask.at[:, 0].set(lowerCamelCase )
# min tokens to keep
UpperCAmelCase__ = score_mask.at[:, : self.min_tokens_to_keep].set(lowerCamelCase )
UpperCAmelCase__ = jnp.where(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jax.lax.sort_key_val(lowerCamelCase , lowerCamelCase )[-1]
return next_scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Union[str, Any] , lowerCamelCase :int , lowerCamelCase :float = -float("Inf" ) , lowerCamelCase :int = 1 ) -> List[str]:
if not isinstance(lowerCamelCase , lowerCamelCase ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
UpperCAmelCase__ = max(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = filter_value
def __call__( self :Optional[int] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ , UpperCAmelCase__ = scores.shape
UpperCAmelCase__ = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCAmelCase__ = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCAmelCase__ , UpperCAmelCase__ = lax.top_k(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jnp.broadcast_to((jnp.arange(lowerCamelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCAmelCase__ = topk_scores.flatten()
UpperCAmelCase__ = topk_indices.flatten() + shift
UpperCAmelCase__ = next_scores_flat.at[topk_indices_flat].set(lowerCamelCase )
UpperCAmelCase__ = next_scores_flat.reshape(lowerCamelCase , lowerCamelCase )
return next_scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Any , lowerCamelCase :int ) -> List[Any]:
UpperCAmelCase__ = bos_token_id
def __call__( self :Optional[int] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ = jnp.full(scores.shape , -float("inf" ) )
UpperCAmelCase__ = 1 - jnp.bool_(cur_len - 1 )
UpperCAmelCase__ = jnp.where(lowerCamelCase , new_scores.at[:, self.bos_token_id].set(0 ) , lowerCamelCase )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Tuple , lowerCamelCase :int , lowerCamelCase :int ) -> List[Any]:
UpperCAmelCase__ = max_length
UpperCAmelCase__ = eos_token_id
def __call__( self :Union[str, Any] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ = jnp.full(scores.shape , -float("inf" ) )
UpperCAmelCase__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCAmelCase__ = jnp.where(lowerCamelCase , new_scores.at[:, self.eos_token_id].set(0 ) , lowerCamelCase )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Optional[Any] , lowerCamelCase :int , lowerCamelCase :int ) -> Tuple:
if not isinstance(lowerCamelCase , lowerCamelCase ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(lowerCamelCase , lowerCamelCase ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
UpperCAmelCase__ = min_length
UpperCAmelCase__ = eos_token_id
def __call__( self :int , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
UpperCAmelCase__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCAmelCase__ = jnp.where(lowerCamelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , lowerCamelCase )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :int , lowerCamelCase :List[str] , lowerCamelCase :str ) -> Any:
UpperCAmelCase__ = list(lowerCamelCase )
UpperCAmelCase__ = begin_index
def __call__( self :Union[str, Any] , lowerCamelCase :Union[str, Any] , lowerCamelCase :List[str] , lowerCamelCase :int ) -> List[Any]:
UpperCAmelCase__ = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCAmelCase__ = jnp.where(lowerCamelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , lowerCamelCase )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :List[Any] , lowerCamelCase :list ) -> Tuple:
UpperCAmelCase__ = list(lowerCamelCase )
def __call__( self :Optional[Any] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :List[Any] , lowerCamelCase :List[str] ) -> Union[str, Any]:
UpperCAmelCase__ = dict(lowerCamelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCAmelCase__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCAmelCase__ = force_token_array.at[index].set(lowerCamelCase )
UpperCAmelCase__ = jnp.intaa(lowerCamelCase )
def __call__( self :Optional[int] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
def _force_token(lowerCamelCase :str ):
UpperCAmelCase__ = scores.shape[0]
UpperCAmelCase__ = self.force_token_array[generation_idx]
UpperCAmelCase__ = jnp.ones_like(lowerCamelCase , dtype=scores.dtype ) * -float("inf" )
UpperCAmelCase__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCAmelCase__ = lax.dynamic_update_slice(lowerCamelCase , lowerCamelCase , (0, current_token) )
return new_scores
UpperCAmelCase__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(lowerCamelCase ) , lambda: scores , ) , )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Optional[Any] , lowerCamelCase :List[Any] , lowerCamelCase :Optional[int] , lowerCamelCase :Tuple ) -> Dict:
UpperCAmelCase__ = generate_config.eos_token_id
UpperCAmelCase__ = generate_config.no_timestamps_token_id
UpperCAmelCase__ = generate_config.no_timestamps_token_id + 1
UpperCAmelCase__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowerCamelCase , "max_initial_timestamp_index" ):
UpperCAmelCase__ = generate_config.max_initial_timestamp_index
else:
UpperCAmelCase__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCAmelCase__ = model_config.vocab_size
def __call__( self :List[str] , lowerCamelCase :str , lowerCamelCase :int , lowerCamelCase :Any ) -> Union[str, Any]:
# suppress <|notimestamps|> which is handled by without_timestamps
UpperCAmelCase__ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(lowerCamelCase :int , lowerCamelCase :Union[str, Any] ):
UpperCAmelCase__ = jnp.where((cur_len - self.begin_index) >= 1 , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , lowerCamelCase , )
UpperCAmelCase__ = jnp.where((cur_len - self.begin_index) < 2 , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , lowerCamelCase , lowerCamelCase , )
return jnp.where(
lowerCamelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , lowerCamelCase , )
UpperCAmelCase__ = jax.vmap(lowerCamelCase )(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jnp.where(cur_len == self.begin_index , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , lowerCamelCase , )
UpperCAmelCase__ = self.timestamp_begin + self.max_initial_timestamp_index
UpperCAmelCase__ = jnp.where(
lowerCamelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , lowerCamelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCAmelCase__ = jax.nn.log_softmax(lowerCamelCase , axis=-1 )
def handle_cumulative_probs(lowerCamelCase :Optional[int] , lowerCamelCase :Optional[Any] ):
UpperCAmelCase__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCAmelCase__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , lowerCamelCase , )
UpperCAmelCase__ = jax.vmap(lowerCamelCase )(lowerCamelCase , lowerCamelCase )
return scores
| 169
| 1
|
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowercase : Dict = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowercase : Any = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowercase : Optional[int] = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowercase : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
_lowercase : str = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
_lowercase : Optional[int] = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
_lowercase : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowercase : List[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowercase : Optional[int] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Any = VOCAB_FILES_NAMES
__magic_name__ : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__magic_name__ : Any = DPRContextEncoderTokenizer
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[str] = VOCAB_FILES_NAMES
__magic_name__ : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Optional[int] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Tuple = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__magic_name__ : Union[str, Any] = DPRQuestionEncoderTokenizer
_lowercase : List[Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowercase : List[str] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowercase : int = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(lowerCAmelCase )
class UpperCamelCase__:
def __call__( self : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Union[bool, str] = False , lowerCAmelCase : Union[bool, str] = False , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : Optional[bool] = None , **lowerCAmelCase : Union[str, Any] , )-> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , )
elif titles is None or texts is None:
UpperCAmelCase = titles if texts is None else texts
return super().__call__(
lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase = titles if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [titles]
UpperCAmelCase = texts if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [texts]
UpperCAmelCase = len(lowerCAmelCase )
UpperCAmelCase = questions if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [questions] * n_passages
assert len(lowerCAmelCase ) == len(
lowerCAmelCase ), F"""There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts."""
UpperCAmelCase = super().__call__(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['''input_ids''']
UpperCAmelCase = super().__call__(lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['''input_ids''']
UpperCAmelCase = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase , lowerCAmelCase )
]
}
if return_attention_mask is not False:
UpperCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase = attention_mask
return self.pad(lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase )
def a__( self : Optional[int] , lowerCAmelCase : BatchEncoding , lowerCAmelCase : DPRReaderOutput , lowerCAmelCase : int = 16 , lowerCAmelCase : int = 64 , lowerCAmelCase : int = 4 , )-> List[DPRSpanPrediction]:
"""simple docstring"""
UpperCAmelCase = reader_input['''input_ids''']
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = reader_output[:3]
UpperCAmelCase = len(lowerCAmelCase )
UpperCAmelCase = sorted(range(lowerCAmelCase ) , reverse=lowerCAmelCase , key=relevance_logits.__getitem__ )
UpperCAmelCase = []
for doc_id in sorted_docs:
UpperCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase = len(lowerCAmelCase )
UpperCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase , top_spans=lowerCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase , start_index=lowerCAmelCase , end_index=lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : List[int] , lowerCAmelCase : int , lowerCAmelCase : int , )-> List[DPRSpanPrediction]:
"""simple docstring"""
UpperCAmelCase = []
for start_index, start_score in enumerate(lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] , reverse=lowerCAmelCase )
UpperCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
UpperCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : Tuple = VOCAB_FILES_NAMES
__magic_name__ : List[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Tuple = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Any = READER_PRETRAINED_INIT_CONFIGURATION
__magic_name__ : List[str] = ["input_ids", "attention_mask"]
__magic_name__ : str = DPRReaderTokenizer
| 91
|
'''simple docstring'''
_lowercase : Any = range(2, 20 + 1)
_lowercase : str = [10**k for k in range(ks[-1] + 1)]
_lowercase : dict[int, dict[int, list[list[int]]]] = {}
def lowerCamelCase__ ( A : int , A : str , A : List[Any] , A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = sum(a_i[j] for j in range(A , len(A ) ) )
UpperCAmelCase = sum(a_i[j] * base[j] for j in range(min(len(A ) , A ) ) )
UpperCAmelCase , UpperCAmelCase = 0, 0
UpperCAmelCase = n - i
UpperCAmelCase = memo.get(A )
if sub_memo is not None:
UpperCAmelCase = sub_memo.get(A )
if jumps is not None and len(A ) > 0:
# find and make the largest jump without going over
UpperCAmelCase = -1
for _k in range(len(A ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCAmelCase = _k
break
if max_jump >= 0:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCAmelCase = diff + c
for j in range(min(A , len(A ) ) ):
UpperCAmelCase , UpperCAmelCase = divmod(A , 10 )
if new_c > 0:
add(A , A , A )
else:
UpperCAmelCase = []
else:
UpperCAmelCase = {c: []}
UpperCAmelCase = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCAmelCase , UpperCAmelCase = next_term(A , k - 1 , i + dn , A )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCAmelCase , UpperCAmelCase = compute(A , A , i + dn , A )
diff += _diff
dn += terms_jumped
UpperCAmelCase = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCAmelCase = 0
while j < len(A ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(A , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase__ ( A : Dict , A : Optional[int] , A : List[Any] , A : int ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(A ):
a_i.extend([0 for _ in range(k - len(A ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCAmelCase = i
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0, 0, 0
for j in range(len(A ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCAmelCase = ds_c + ds_b
diff += addend
UpperCAmelCase = 0
for j in range(A ):
UpperCAmelCase = a_i[j] + addend
UpperCAmelCase , UpperCAmelCase = divmod(A , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(A , A , A )
return diff, i - start_i
def lowerCamelCase__ ( A : List[str] , A : Optional[int] , A : Optional[Any] ):
'''simple docstring'''
for j in range(A , len(A ) ):
UpperCAmelCase = digits[j] + addend
if s >= 10:
UpperCAmelCase , UpperCAmelCase = divmod(A , 10 )
UpperCAmelCase = addend // 10 + quotient
else:
UpperCAmelCase = s
UpperCAmelCase = addend // 10
if addend == 0:
break
while addend > 0:
UpperCAmelCase , UpperCAmelCase = divmod(A , 10 )
digits.append(A )
def lowerCamelCase__ ( A : int = 10**15 ):
'''simple docstring'''
UpperCAmelCase = [1]
UpperCAmelCase = 1
UpperCAmelCase = 0
while True:
UpperCAmelCase , UpperCAmelCase = next_term(A , 20 , i + dn , A )
dn += terms_jumped
if dn == n - i:
break
UpperCAmelCase = 0
for j in range(len(A ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 91
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7
|
from timeit import timeit
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('the value of input must not be negative' )
A__ = 0
while number:
number &= number - 1
result += 1
return result
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('the value of input must not be negative' )
A__ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _snake_case( ) -> None:
'''simple docstring'''
def do_benchmark(SCREAMING_SNAKE_CASE__ : int ) -> None:
A__ = 'import __main__ as z'
print(f'Benchmark when {number = }:' )
print(f'{get_set_bits_count_using_modulo_operator(SCREAMING_SNAKE_CASE__ ) = }' )
A__ = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=SCREAMING_SNAKE_CASE__ )
print(f'timeit() runs in {timing} seconds' )
print(f'{get_set_bits_count_using_brian_kernighans_algorithm(SCREAMING_SNAKE_CASE__ ) = }' )
A__ = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=SCREAMING_SNAKE_CASE__ , )
print(f'timeit() runs in {timing} seconds' )
for number in (25, 37, 58, 0):
do_benchmark(SCREAMING_SNAKE_CASE__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 7
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any]=1_3, UpperCAmelCase__ : Tuple=7, UpperCAmelCase__ : List[Any]=True, UpperCAmelCase__ : str=True, UpperCAmelCase__ : Union[str, Any]=True, UpperCAmelCase__ : Dict=True, UpperCAmelCase__ : Union[str, Any]=9_9, UpperCAmelCase__ : Dict=3_2, UpperCAmelCase__ : Tuple=5, UpperCAmelCase__ : Union[str, Any]=4, UpperCAmelCase__ : int=3_7, UpperCAmelCase__ : Optional[int]="gelu", UpperCAmelCase__ : Tuple=0.1, UpperCAmelCase__ : Union[str, Any]=0.1, UpperCAmelCase__ : Any=5_1_2, UpperCAmelCase__ : Dict=1_6, UpperCAmelCase__ : List[Any]=2, UpperCAmelCase__ : str=0.02, UpperCAmelCase__ : Tuple=4, ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_attention_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_choices
def _lowercase ( self : Tuple ):
__lowercase = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
__lowercase = None
if self.use_attention_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
__lowercase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_snake_case, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def _lowercase ( self : List[Any] ):
__lowercase = self.prepare_config_and_inputs()
__lowercase = config_and_inputs
__lowercase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _lowercase ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
__lowercase = config_and_inputs
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = True
__UpperCAmelCase : Any = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowercase ( self : List[Any] ):
__lowercase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def _lowercase ( self : Any ):
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40", from_pt=_snake_case )
__lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_snake_case )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : int ):
__lowercase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40", from_pt=_snake_case )
__lowercase = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]], dtype=jnp.intaa )
__lowercase = model(_snake_case )[0]
__lowercase = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ), _snake_case )
# compare the actual values for a slice.
__lowercase = np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], _snake_case, atol=1E-4 ) )
@slow
def _lowercase ( self : int ):
__lowercase = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40", from_pt=_snake_case )
__lowercase = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]], dtype=jnp.intaa )
__lowercase = model(_snake_case )[0]
# compare the actual values for a slice.
__lowercase = np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], _snake_case, atol=1E-4 ) )
| 351
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _A ( ) -> None:
'''simple docstring'''
print("Making key files...")
make_key_files("rsa", 1024)
print("Key files generation successful.")
def _A ( UpperCamelCase_ : int) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p...")
__lowercase = rabinMiller.generate_large_prime(UpperCamelCase_)
print("Generating prime q...")
__lowercase = rabinMiller.generate_large_prime(UpperCamelCase_)
__lowercase = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)...")
while True:
__lowercase = random.randrange(2 ** (key_size - 1), 2 ** (key_size))
if cryptoMath.gcd(UpperCamelCase_, (p - 1) * (q - 1)) == 1:
break
print("Calculating d that is mod inverse of e...")
__lowercase = cryptoMath.find_mod_inverse(UpperCamelCase_, (p - 1) * (q - 1))
__lowercase = (n, e)
__lowercase = (n, d)
return (public_key, private_key)
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : int) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""") or os.path.exists(F"""{name}_privkey.txt"""):
print("\nWARNING:")
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program.")
sys.exit()
__lowercase ,__lowercase = generate_key(UpperCamelCase_)
print(F"""\nWriting public key to file {name}_pubkey.txt...""")
with open(F"""{name}_pubkey.txt""", "w") as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""")
print(F"""Writing private key to file {name}_privkey.txt...""")
with open(F"""{name}_privkey.txt""", "w") as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""")
if __name__ == "__main__":
main()
| 144
| 0
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __magic_name__ :
"""simple docstring"""
def __init__( self :int , snake_case :Dict , snake_case :Optional[Any]=13 , snake_case :Any=2 , snake_case :int=24 , snake_case :Optional[int]=16 , snake_case :Tuple=True , snake_case :int=True , snake_case :str=32 , snake_case :Any=5 , snake_case :Tuple=4 , snake_case :int=37 , snake_case :List[str]="gelu" , snake_case :Union[str, Any]=0.1 , snake_case :Optional[int]=0.1 , snake_case :List[str]=10 , snake_case :Optional[int]=0.02 , snake_case :Optional[int]=None , snake_case :str=2 , snake_case :str=2 , ):
'''simple docstring'''
A_ : Dict = parent
A_ : Optional[Any] = batch_size
A_ : Any = patch_size
A_ : int = max_length
A_ : List[str] = num_mel_bins
A_ : Tuple = is_training
A_ : Tuple = use_labels
A_ : Optional[Any] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : List[str] = intermediate_size
A_ : Dict = hidden_act
A_ : str = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : Tuple = type_sequence_label_size
A_ : List[str] = initializer_range
A_ : Any = scope
A_ : Dict = frequency_stride
A_ : str = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A_ : Dict = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
A_ : Optional[int] = (self.max_length - self.patch_size) // self.time_stride + 1
A_ : List[Any] = frequency_out_dimension * time_out_dimension
A_ : str = num_patches + 2
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Optional[Any] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
A_ : Optional[Any] = None
if self.use_labels:
A_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[int] = self.get_config()
return config, input_values, labels
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :Optional[int] , snake_case :List[str] , snake_case :Any ):
'''simple docstring'''
A_ : Union[str, Any] = ASTModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : List[Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) ,
) : int = config_and_inputs
A_ : Any = {"input_values": input_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :str , snake_case :List[Any] , snake_case :Optional[int] , snake_case :Tuple , snake_case :Optional[int] ):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Any = ASTModelTester(self )
A_ : Any = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(_a )
A_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Union[str, Any] = [*signature.parameters.keys()]
A_ : Any = ["input_values"]
self.assertListEqual(arg_names[:1] , _a )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
@slow
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = ASTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __snake_case ( ) -> int:
A_ : str = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
A_ , A_ : str = torchaudio.load(lowerCAmelCase__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Union[str, Any] = self.default_feature_extractor
A_ : List[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(_a )
A_ : List[str] = self.default_feature_extractor
A_ , A_ : Any = prepare_audio()
A_ : int = audio.squeeze().numpy()
A_ : Dict = feature_extractor(_a , sampling_rate=_a , return_tensors="pt" ).to(_a )
# forward pass
with torch.no_grad():
A_ : Tuple = model(**_a )
# verify the logits
A_ : int = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _a )
A_ : int = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 300
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : list[str] ) -> str:
__a = ''''''
for word_or_phrase in separated:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 45
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : List[str] = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class __snake_case ( a ):
UpperCAmelCase__ : str = '''pix2struct_text_model'''
UpperCAmelCase__ : Optional[Any] = ['''past_key_values''']
UpperCAmelCase__ : List[str] = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[Any] , _snake_case : Optional[Any]=50244 , _snake_case : List[str]=768 , _snake_case : Tuple=64 , _snake_case : Tuple=2048 , _snake_case : Any=12 , _snake_case : int=12 , _snake_case : Dict=32 , _snake_case : Union[str, Any]=128 , _snake_case : Optional[int]=0.1 , _snake_case : str=1e-6 , _snake_case : int=1.0 , _snake_case : List[Any]="gelu_new" , _snake_case : Optional[int]=0 , _snake_case : Union[str, Any]=False , _snake_case : int=0 , _snake_case : Optional[int]=1 , _snake_case : int=False , _snake_case : Any=True , **_snake_case : str , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = d_kv
UpperCAmelCase_ = d_ff
UpperCAmelCase_ = num_layers
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = relative_attention_num_buckets
UpperCAmelCase_ = relative_attention_max_distance
UpperCAmelCase_ = dropout_rate
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_factor
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase_ = dense_act_fn
super().__init__(
pad_token_id=_snake_case , eos_token_id=_snake_case , decoder_start_token_id=_snake_case , tie_word_embeddings=_snake_case , is_decoder=_snake_case , **_snake_case , )
@classmethod
def lowerCamelCase ( cls : Optional[Any] , _snake_case : Union[str, os.PathLike] , **_snake_case : Union[str, Any]):
"""simple docstring"""
cls._set_token_in_kwargs(_snake_case)
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_snake_case , **_snake_case)
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''') == "pix2struct":
UpperCAmelCase_ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(_snake_case , **_snake_case)
class __snake_case ( a ):
UpperCAmelCase__ : str = '''pix2struct_vision_model'''
def __init__( self : Any , _snake_case : List[str]=768 , _snake_case : str=768 , _snake_case : List[Any]=2048 , _snake_case : Optional[Any]=64 , _snake_case : Union[str, Any]=12 , _snake_case : Optional[int]=12 , _snake_case : List[str]="gelu_new" , _snake_case : Any=1e-6 , _snake_case : Dict=0.0 , _snake_case : Dict=0.0 , _snake_case : Optional[Any]=1e-10 , _snake_case : List[Any]=1.0 , _snake_case : int=4096 , _snake_case : List[str]=32 , _snake_case : List[str]=128 , **_snake_case : List[str] , ):
"""simple docstring"""
super().__init__(**_snake_case)
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = patch_embed_hidden_size
UpperCAmelCase_ = d_ff
UpperCAmelCase_ = dropout_rate
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = initializer_factor
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = dense_act_fn
UpperCAmelCase_ = seq_len
UpperCAmelCase_ = relative_attention_num_buckets
UpperCAmelCase_ = relative_attention_max_distance
UpperCAmelCase_ = d_kv
@classmethod
def lowerCamelCase ( cls : Tuple , _snake_case : Union[str, os.PathLike] , **_snake_case : str):
"""simple docstring"""
cls._set_token_in_kwargs(_snake_case)
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_snake_case , **_snake_case)
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''') == "pix2struct":
UpperCAmelCase_ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(_snake_case , **_snake_case)
class __snake_case ( a ):
UpperCAmelCase__ : Optional[Any] = '''pix2struct'''
UpperCAmelCase__ : int = True
def __init__( self : Any , _snake_case : Any=None , _snake_case : Union[str, Any]=None , _snake_case : Union[str, Any]=1.0 , _snake_case : List[str]=0.0_2 , _snake_case : str=False , _snake_case : str=False , _snake_case : str=True , **_snake_case : str , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=_snake_case , is_encoder_decoder=_snake_case , **_snake_case)
if text_config is None:
UpperCAmelCase_ = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''')
if vision_config is None:
UpperCAmelCase_ = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''')
UpperCAmelCase_ = PixaStructTextConfig(**_snake_case)
UpperCAmelCase_ = PixaStructVisionConfig(**_snake_case)
UpperCAmelCase_ = self.text_config.decoder_start_token_id
UpperCAmelCase_ = self.text_config.pad_token_id
UpperCAmelCase_ = self.text_config.eos_token_id
UpperCAmelCase_ = initializer_factor
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = self.initializer_range
UpperCAmelCase_ = self.initializer_range
UpperCAmelCase_ = is_vqa
@classmethod
def lowerCamelCase ( cls : List[Any] , _snake_case : PixaStructTextConfig , _snake_case : PixaStructVisionConfig , **_snake_case : Optional[int]):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = copy.deepcopy(self.__dict__)
UpperCAmelCase_ = self.text_config.to_dict()
UpperCAmelCase_ = self.vision_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 7
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any]):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']):
UpperCAmelCase_ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''sgugger/tiny-distilbert-classification'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , torchscript=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''')
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , fpaa=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
# set architectures equal to `None`
UpperCAmelCase_ = None
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , '''inf_time.csv''') , train_memory_csv_file=os.path.join(_snake_case , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(_snake_case , '''inf_mem.csv''') , train_time_csv_file=os.path.join(_snake_case , '''train_time.csv''') , env_info_csv_file=os.path.join(_snake_case , '''env.csv''') , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
benchmark.run()
self.assertTrue(Path(os.path.join(_snake_case , '''inf_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''inf_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''env.csv''')).exists())
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_snake_case : Tuple):
self.assertTrue(hasattr(_snake_case , '''sequential'''))
self.assertTrue(hasattr(_snake_case , '''cumulative'''))
self.assertTrue(hasattr(_snake_case , '''current'''))
self.assertTrue(hasattr(_snake_case , '''total'''))
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , '''log.txt''') , log_print=_snake_case , trace_memory_line_by_line=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(_snake_case , '''log.txt''')).exists())
| 7
| 1
|
"""simple docstring"""
def lowerCAmelCase__ ( ):
'''simple docstring'''
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
_snake_case = generate_large_matrix()
_snake_case = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
assert all(row == sorted(__snake_case , reverse=__snake_case ) for row in grid )
assert all(list(__snake_case ) == sorted(__snake_case , reverse=__snake_case ) for col in zip(*__snake_case ) )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : int = 0
_a : List[Any] = len(__snake_case ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_a : Any = (left + right) // 2
_a : Union[str, Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_a : List[Any] = mid + 1
else:
_a : Tuple = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__snake_case )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = 0
_a : Union[str, Any] = len(grid[0] )
for i in range(len(__snake_case ) ):
_a : Optional[int] = find_negative_index(grid[i][:bound] )
total += bound
return (len(__snake_case ) * len(grid[0] )) - total
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = 0
for row in grid:
for i, number in enumerate(__snake_case ):
if number < 0:
total += len(__snake_case ) - i
break
return total
def lowerCAmelCase__ ( ):
'''simple docstring'''
from timeit import timeit
print("""Running benchmarks""" )
_a : int = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_a : Optional[int] = timeit(F"""{func}(grid=grid)""" , setup=__snake_case , number=5_0_0 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 294
|
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
__snake_case : Optional[Any] = [8, 5, 9, 7]
__snake_case : List[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__snake_case : Optional[int] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class A__ :
'''simple docstring'''
def __init__( self: Any , _SCREAMING_SNAKE_CASE: list[int] , _SCREAMING_SNAKE_CASE: list[list[int]] , _SCREAMING_SNAKE_CASE: list[list[int]] , ) -> None:
"""simple docstring"""
__lowerCAmelCase : Any = claim_vector
__lowerCAmelCase : Tuple = allocated_resources_table
__lowerCAmelCase : Tuple = maximum_claim_table
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table)
for i in range(len(self.__allocated_resources_table[0]))
]
def _SCREAMING_SNAKE_CASE ( self: int) -> list[int]:
"""simple docstring"""
return np.array(self.__claim_vector) - np.array(
self.__processes_resource_summation())
def _SCREAMING_SNAKE_CASE ( self: int) -> list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i]) - np.array(_SCREAMING_SNAKE_CASE))
for i, allocated_resource in enumerate(self.__allocated_resources_table)
]
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(_SCREAMING_SNAKE_CASE): i for i in self.__need()}
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , **_SCREAMING_SNAKE_CASE: List[Any]) -> None:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.__need()
__lowerCAmelCase : int = self.__allocated_resources_table
__lowerCAmelCase : Dict = self.__available_resources()
__lowerCAmelCase : str = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n")
while need_list:
__lowerCAmelCase : int = False
for each_need in need_list:
__lowerCAmelCase : Dict = True
for index, need in enumerate(_SCREAMING_SNAKE_CASE):
if need > available_resources[index]:
__lowerCAmelCase : Dict = False
break
if execution:
__lowerCAmelCase : Any = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase : Union[str, Any] = original_need_index
print(F"""Process {process_number + 1} is executing.""")
# remove the process run from stack
need_list.remove(_SCREAMING_SNAKE_CASE)
# update available/freed resources stack
__lowerCAmelCase : Dict = np.array(_SCREAMING_SNAKE_CASE) + np.array(
alloc_resources_table[process_number])
print(
"Updated available resource stack for processes: "
+ " ".join([str(_SCREAMING_SNAKE_CASE) for x in available_resources]))
break
if safe:
print("The process is in a safe state.\n")
else:
print("System in unsafe state. Aborting...\n")
break
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]:
"""simple docstring"""
print(" " * 9 + "Allocated Resource Table")
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(_SCREAMING_SNAKE_CASE) + 1}"""
+ " ".join(F"""{it:>8}""" for it in item)
+ "\n")
print(" " * 9 + "System Resource Table")
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(_SCREAMING_SNAKE_CASE) + 1}"""
+ " ".join(F"""{it:>8}""" for it in item)
+ "\n")
print(
"Current Usage by Active Processes: "
+ " ".join(str(_SCREAMING_SNAKE_CASE) for x in self.__claim_vector))
print(
"Initial Available Resources: "
+ " ".join(str(_SCREAMING_SNAKE_CASE) for x in self.__available_resources()))
time.sleep(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 269
| 0
|
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = SwinConfig()
SCREAMING_SNAKE_CASE_: Optional[Any] = swin_name.split("_" )
SCREAMING_SNAKE_CASE_: str = name_split[1]
SCREAMING_SNAKE_CASE_: int = int(name_split[4] )
SCREAMING_SNAKE_CASE_: Optional[Any] = int(name_split[3][-1] )
if model_size == "tiny":
SCREAMING_SNAKE_CASE_: Dict = 96
SCREAMING_SNAKE_CASE_: List[str] = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE_: Union[str, Any] = (3, 6, 12, 24)
elif model_size == "small":
SCREAMING_SNAKE_CASE_: str = 96
SCREAMING_SNAKE_CASE_: Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE_: Union[str, Any] = (3, 6, 12, 24)
elif model_size == "base":
SCREAMING_SNAKE_CASE_: Optional[Any] = 1_28
SCREAMING_SNAKE_CASE_: List[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE_: Optional[Any] = (4, 8, 16, 32)
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] = 1_92
SCREAMING_SNAKE_CASE_: Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE_: List[str] = (6, 12, 24, 48)
if "in22k" in swin_name:
SCREAMING_SNAKE_CASE_: Dict = 2_18_41
else:
SCREAMING_SNAKE_CASE_: Tuple = 10_00
SCREAMING_SNAKE_CASE_: Dict = "huggingface/label-files"
SCREAMING_SNAKE_CASE_: Any = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_: Tuple = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_: str = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE_: Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: List[Any] = img_size
SCREAMING_SNAKE_CASE_: List[str] = num_classes
SCREAMING_SNAKE_CASE_: List[str] = embed_dim
SCREAMING_SNAKE_CASE_: Dict = depths
SCREAMING_SNAKE_CASE_: Union[str, Any] = num_heads
SCREAMING_SNAKE_CASE_: List[str] = window_size
return config
def A_ ( _UpperCAmelCase ):
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: List[Any] = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
SCREAMING_SNAKE_CASE_: Tuple = "encoder." + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[int] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: List[Any] = name.replace("attn" , "attention.self" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: List[str] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: Optional[int] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Optional[int] = name.replace("mlp.fc2" , "output.dense" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE_: Optional[int] = "layernorm.weight"
if name == "norm.bias":
SCREAMING_SNAKE_CASE_: Any = "layernorm.bias"
if "head" in name:
SCREAMING_SNAKE_CASE_: Any = name.replace("head" , "classifier" )
else:
SCREAMING_SNAKE_CASE_: List[str] = "swin." + name
return name
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] = orig_state_dict.pop(_UpperCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
SCREAMING_SNAKE_CASE_: List[Any] = key.split("." )
SCREAMING_SNAKE_CASE_: int = int(key_split[1] )
SCREAMING_SNAKE_CASE_: int = int(key_split[3] )
SCREAMING_SNAKE_CASE_: int = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE_: Union[str, Any] = val[:dim, :]
SCREAMING_SNAKE_CASE_: Union[str, Any] = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE_: Dict = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_: Optional[Any] = val[
:dim
]
SCREAMING_SNAKE_CASE_: Any = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE_: Dict = val[
-dim:
]
else:
SCREAMING_SNAKE_CASE_: Optional[int] = val
return orig_state_dict
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase )
timm_model.eval()
SCREAMING_SNAKE_CASE_: Any = get_swin_config(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = SwinForImageClassification(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: str = convert_state_dict(timm_model.state_dict() , _UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_: Any = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) )
SCREAMING_SNAKE_CASE_: Tuple = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
SCREAMING_SNAKE_CASE_: Dict = image_processor(images=_UpperCAmelCase , return_tensors="pt" )
SCREAMING_SNAKE_CASE_: Union[str, Any] = timm_model(inputs["pixel_values"] )
SCREAMING_SNAKE_CASE_: Tuple = model(**_UpperCAmelCase ).logits
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 )
print(f"Saving model {swin_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCAmelCase : Dict = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 127
|
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127
| 1
|
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowerCAmelCase__ : str = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowerCAmelCase__ : Any = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
lowerCAmelCase__ : List[Any] = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def __a ( self ) -> List[str]:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = CHRF.CHAR_ORDER , __UpperCamelCase = CHRF.WORD_ORDER , __UpperCamelCase = CHRF.BETA , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , ) -> Dict:
'''simple docstring'''
snake_case__ : str = len(references[0] )
if any(len(__UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
snake_case__ : Optional[Any] = [[refs[i] for refs in references] for i in range(__UpperCamelCase )]
snake_case__ : Optional[int] = CHRF(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case__ : int = sb_chrf.corpus_score(__UpperCamelCase , __UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 143
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = KandinskyVaaControlnetPipeline
__lowerCamelCase = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCamelCase = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowerCamelCase = False
@property
def __a ( self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def __a ( self ) -> int:
'''simple docstring'''
return 32
@property
def __a ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def __a ( self ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __a ( self ) -> List[Any]:
'''simple docstring'''
return 100
@property
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Tuple = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
snake_case__ : Tuple = UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __a ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : int = self.dummy_unet
snake_case__ : Tuple = self.dummy_movq
snake_case__ : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=__UpperCamelCase , )
snake_case__ : str = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> int:
'''simple docstring'''
snake_case__ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__UpperCamelCase )
# create hint
snake_case__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith('mps' ):
snake_case__ : Any = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : str = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ : int = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = 'cpu'
snake_case__ : Any = self.get_dummy_components()
snake_case__ : Optional[Any] = self.pipeline_class(**__UpperCamelCase )
snake_case__ : Dict = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
snake_case__ : Dict = output.images
snake_case__ : Any = pipe(
**self.get_dummy_inputs(__UpperCamelCase ) , return_dict=__UpperCamelCase , )[0]
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
snake_case__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ : str = np.array(
[0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
snake_case__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
snake_case__ : List[str] = torch.from_numpy(np.array(__UpperCamelCase ) ).float() / 2_5_5.0
snake_case__ : Dict = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case__ : int = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCamelCase )
snake_case__ : int = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
snake_case__ : List[Any] = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Optional[int] = 'A robot, 4k photo'
snake_case__ : List[Any] = torch.Generator(device='cuda' ).manual_seed(0 )
snake_case__ , snake_case__ : Tuple = pipe_prior(
__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
snake_case__ : List[Any] = torch.Generator(device='cuda' ).manual_seed(0 )
snake_case__ : Dict = pipeline(
image_embeds=__UpperCamelCase , negative_image_embeds=__UpperCamelCase , hint=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=100 , output_type='np' , )
snake_case__ : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
| 143
| 1
|
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : List[Any], __snake_case : Any, __snake_case : str, __snake_case : Union[str, Any]=True, __snake_case : str="pt" ) -> int:
A__ : Dict ={"""add_prefix_space""": True} if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and not line.startswith(""" """ ) else {}
A__ : Any =padding_side
return tokenizer(
[line], max_length=SCREAMING_SNAKE_CASE_, padding="""max_length""" if pad_to_max_length else None, truncation=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : Optional[Any], __snake_case : int=None, ) -> Optional[Any]:
A__ : Optional[int] =input_ids.ne(SCREAMING_SNAKE_CASE_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any]="train" , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[Any]="" , ) -> List[Any]:
'''simple docstring'''
super().__init__()
A__ : Union[str, Any] =Path(lowerCAmelCase__ ).joinpath(type_path + """.source""" )
A__ : Optional[Any] =Path(lowerCAmelCase__ ).joinpath(type_path + """.target""" )
A__ : List[Any] =self.get_char_lens(self.src_file )
A__ : Tuple =max_source_length
A__ : Tuple =max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
A__ : List[str] =tokenizer
A__ : Any =prefix
if n_obs is not None:
A__ : List[Any] =self.src_lens[:n_obs]
A__ : Optional[Any] =src_lang
A__ : List[str] =tgt_lang
def __len__( self : int ) -> int:
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : Tuple , lowerCAmelCase_ : List[Any] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A__ : List[Any] =index + 1 # linecache starts at 1
A__ : List[str] =self.prefix + linecache.getline(str(self.src_file ) , lowerCAmelCase__ ).rstrip("""\n""" )
A__ : List[str] =linecache.getline(str(self.tgt_file ) , lowerCAmelCase__ ).rstrip("""\n""" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A__ : Optional[int] =(
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
)
A__ : Dict =self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
A__ : Dict =encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_source_length , """right""" )
A__ : int =encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_target_length , """right""" )
A__ : List[Any] =source_inputs["""input_ids"""].squeeze()
A__ : int =target_inputs["""input_ids"""].squeeze()
A__ : List[str] =source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowercase__ ( lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return [len(lowerCAmelCase__ ) for x in Path(lowerCAmelCase__ ).open().readlines()]
def lowercase__ ( self : Dict , lowerCAmelCase_ : Dict ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A__ : Union[str, Any] =torch.stack([x["""input_ids"""] for x in batch] )
A__ : List[str] =torch.stack([x["""attention_mask"""] for x in batch] )
A__ : List[Any] =torch.stack([x["""decoder_input_ids"""] for x in batch] )
A__ : Tuple =(
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
A__ : List[str] =(
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
A__ : Union[str, Any] =trim_batch(lowerCAmelCase__ , lowerCAmelCase__ )
A__ , A__ : Tuple =trim_batch(lowerCAmelCase__ , lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
A__ : Any ={
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__snake_case : Tuple = getLogger(__name__)
def __lowerCamelCase ( __snake_case : List[List] ) -> Optional[Any]:
return list(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) )
def __lowerCamelCase ( __snake_case : str ) -> None:
A__ : int =get_git_info()
save_json(SCREAMING_SNAKE_CASE_, os.path.join(SCREAMING_SNAKE_CASE_, """git_log.json""" ) )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Any, __snake_case : Optional[int]=4, **__snake_case : str ) -> Any:
with open(SCREAMING_SNAKE_CASE_, """w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, indent=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> List[Any]:
with open(SCREAMING_SNAKE_CASE_ ) as f:
return json.load(SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( ) -> Any:
A__ : str =git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE_ )
A__ : Optional[Any] ={
"""repo_id""": str(SCREAMING_SNAKE_CASE_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def __lowerCamelCase ( __snake_case : Callable, __snake_case : Iterable ) -> List:
return list(map(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : int ) -> List[Any]:
with open(SCREAMING_SNAKE_CASE_, """wb""" ) as f:
return pickle.dump(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( __snake_case : List[Any] ) -> Optional[int]:
def remove_articles(__snake_case : Tuple ):
return re.sub(r"""\b(a|an|the)\b""", """ """, SCREAMING_SNAKE_CASE_ )
def white_space_fix(__snake_case : str ):
return " ".join(text.split() )
def remove_punc(__snake_case : Dict ):
A__ : Dict =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__snake_case : Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE_ ) ) ) )
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : str ) -> Tuple:
A__ : List[Any] =normalize_answer(SCREAMING_SNAKE_CASE_ ).split()
A__ : List[str] =normalize_answer(SCREAMING_SNAKE_CASE_ ).split()
A__ : Dict =Counter(SCREAMING_SNAKE_CASE_ ) & Counter(SCREAMING_SNAKE_CASE_ )
A__ : Optional[Any] =sum(common.values() )
if num_same == 0:
return 0
A__ : Tuple =1.0 * num_same / len(SCREAMING_SNAKE_CASE_ )
A__ : List[Any] =1.0 * num_same / len(SCREAMING_SNAKE_CASE_ )
A__ : Tuple =(2 * precision * recall) / (precision + recall)
return fa
def __lowerCamelCase ( __snake_case : str, __snake_case : List[str] ) -> str:
return normalize_answer(SCREAMING_SNAKE_CASE_ ) == normalize_answer(SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( __snake_case : List[str], __snake_case : List[str] ) -> Dict:
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
A__ : Optional[int] =0
for hypo, pred in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
em += exact_match_score(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
em /= len(SCREAMING_SNAKE_CASE_ )
return {"em": em}
def __lowerCamelCase ( __snake_case : Tuple ) -> List[Any]:
return model_prefix.startswith("""rag""" )
def __lowerCamelCase ( __snake_case : List[str], __snake_case : int, __snake_case : Dict ) -> Union[str, Any]:
A__ : Optional[int] ={p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A__ : List[Any] ="""dropout_rate"""
for p in extra_params:
if getattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
if not hasattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and not hasattr(SCREAMING_SNAKE_CASE_, equivalent_param[p] ):
logger.info("""config doesn\'t have a `{}` attribute""".format(SCREAMING_SNAKE_CASE_ ) )
delattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
continue
A__ : Union[str, Any] =p if hasattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else equivalent_param[p]
setattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, getattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
delattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return hparams, config
| 366
|
'''simple docstring'''
import torch
from torch import nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : str=False ) -> List[str]:
'''simple docstring'''
super().__init__()
A__ : Any =n_token
A__ : int =d_embed
A__ : Any =d_proj
A__ : Tuple =cutoffs + [n_token]
A__ : Optional[Any] =[0] + self.cutoffs
A__ : Dict =div_val
A__ : str =self.cutoffs[0]
A__ : Optional[Any] =len(self.cutoffs ) - 1
A__ : List[Any] =self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
A__ : Any =nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
A__ : str =nn.Parameter(torch.zeros(self.n_clusters ) )
A__ : Union[str, Any] =nn.ModuleList()
A__ : Optional[int] =nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
else:
self.out_projs.append(lowerCAmelCase_ )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
for i in range(len(self.cutoffs ) ):
A__ , A__ : Optional[int] =self.cutoff_ends[i], self.cutoff_ends[i + 1]
A__ : Tuple =d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , r_idx - l_idx ) )
A__ : Optional[int] =keep_order
def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
if proj is None:
A__ : Optional[int] =nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
A__ : Optional[int] =nn.functional.linear(lowerCAmelCase_ , proj.t().contiguous() )
A__ : Union[str, Any] =nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Dict=False ) -> Optional[int]:
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
A__ : Optional[Any] =hidden[..., :-1, :].contiguous()
A__ : List[Any] =labels[..., 1:].contiguous()
A__ : Optional[int] =hidden.view(-1 , hidden.size(-1 ) )
A__ : str =labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
A__ : Optional[int] =hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
A__ : Optional[Any] =self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
A__ : Tuple =labels != -1_00
A__ : int =torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
A__ : Union[str, Any] =(
-nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
A__ : List[Any] =nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
A__ , A__ : Any =[], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
A__ , A__ : Optional[int] =self.cutoff_ends[i], self.cutoff_ends[i + 1]
A__ : int =self.out_layers[0].weight[l_idx:r_idx]
A__ : List[str] =self.out_layers[0].bias[l_idx:r_idx]
else:
A__ : List[str] =self.out_layers[i].weight
A__ : Union[str, Any] =self.out_layers[i].bias
if i == 0:
A__ : Tuple =torch.cat([weight_i, self.cluster_weight] , dim=0 )
A__ : List[str] =torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
A__ , A__ , A__ : Tuple =weights[0], biases[0], self.out_projs[0]
A__ : List[Any] =self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : int =nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
if labels is None:
A__ : Union[str, Any] =hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
A__ : Union[str, Any] =torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
A__ : Any =0
A__ : Tuple =[0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
A__ , A__ : Tuple =cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
A__ : Tuple =(labels >= l_idx) & (labels < r_idx)
A__ : Any =mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
A__ : int =labels.index_select(0 , lowerCAmelCase_ ) - l_idx
A__ : List[str] =head_logprob.index_select(0 , lowerCAmelCase_ )
A__ : str =hidden.index_select(0 , lowerCAmelCase_ )
else:
A__ : Optional[Any] =hidden
if i == 0:
if labels is not None:
A__ : Optional[Any] =head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
A__ : Union[str, Any] =head_logprob[:, : self.cutoffs[0]]
else:
A__ , A__ , A__ : Dict =weights[i], biases[i], self.out_projs[i]
A__ : List[Any] =self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : List[Any] =nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
A__ : Optional[Any] =self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
A__ : Union[str, Any] =head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
A__ : List[str] =head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
A__ : Tuple =logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
if self.n_clusters == 0:
A__ : List[str] =self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
A__ , A__ : List[str] =[], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
A__ , A__ : int =self.cutoff_ends[i], self.cutoff_ends[i + 1]
A__ : List[str] =self.out_layers[0].weight[l_idx:r_idx]
A__ : List[Any] =self.out_layers[0].bias[l_idx:r_idx]
else:
A__ : Dict =self.out_layers[i].weight
A__ : Any =self.out_layers[i].bias
if i == 0:
A__ : List[str] =torch.cat([weight_i, self.cluster_weight] , dim=0 )
A__ : Tuple =torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
A__ , A__ , A__ : Optional[int] =weights[0], biases[0], self.out_projs[0]
A__ : Any =self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Dict =hidden.new_empty((head_logit.size(0 ), self.n_token) )
A__ : Dict =nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
A__ : Tuple =[0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
A__ , A__ : List[Any] =cutoff_values[i], cutoff_values[i + 1]
if i == 0:
A__ : Tuple =head_logprob[:, : self.cutoffs[0]]
else:
A__ , A__ , A__ : Any =weights[i], biases[i], self.out_projs[i]
A__ : Dict =self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : str =nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
A__ : str =head_logprob[:, -i] + tail_logprob_i
A__ : List[Any] =logprob_i
return out
| 136
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.