code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
a : Tuple = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self ):
lowerCAmelCase_: str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowerCAmelCase_: List[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def _a ( self ):
lowerCAmelCase_: List[Any] = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
lowerCAmelCase_: int = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowerCAmelCase_: Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowerCAmelCase_: List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase_: List[str] = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
lowerCAmelCase_: Tuple = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowerCamelCase__ , "w" , newline="\n" ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def _a ( self ):
lowerCAmelCase_: str = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def _a ( self ):
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
# Copy consistency with a really long name
lowerCAmelCase_: str = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowerCamelCase__ , overwrite_result=re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
| 613
|
def snake_case__ ( lowercase , lowercase ):
lowerCAmelCase_: list[list[str]] = [[] for _ in range(lowercase )]
lowerCAmelCase_: Optional[Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(lowercase ) <= key:
return input_string
for position, character in enumerate(lowercase ):
lowerCAmelCase_: Optional[Any] = position % (lowest * 2) # puts it in bounds
lowerCAmelCase_: Any = min(lowercase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowercase )
lowerCAmelCase_: Optional[int] = ["".join(lowercase ) for row in temp_grid]
lowerCAmelCase_: int = "".join(lowercase )
return output_string
def snake_case__ ( lowercase , lowercase ):
lowerCAmelCase_: Tuple = []
lowerCAmelCase_: str = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
lowerCAmelCase_: list[list[str]] = [[] for _ in range(lowercase )] # generates template
for position in range(len(lowercase ) ):
lowerCAmelCase_: List[str] = position % (lowest * 2) # puts it in bounds
lowerCAmelCase_: Optional[int] = min(lowercase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
lowerCAmelCase_: Optional[Any] = 0
for row in temp_grid: # fills in the characters
lowerCAmelCase_: Tuple = input_string[counter : counter + len(lowercase )]
grid.append(list(lowercase ) )
counter += len(lowercase )
lowerCAmelCase_: int = "" # reads as zigzag
for position in range(len(lowercase ) ):
lowerCAmelCase_: str = position % (lowest * 2) # puts it in bounds
lowerCAmelCase_: Optional[int] = min(lowercase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def snake_case__ ( lowercase ):
lowerCAmelCase_: Dict = {}
for key_guess in range(1 , len(lowercase ) ): # tries every key
lowerCAmelCase_: int = decrypt(lowercase , lowercase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 613
| 1
|
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase__( snake_case_ ):
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
super().__init__()
__lowercase = nn.ModuleList(__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(__UpperCAmelCase , __UpperCAmelCase , self.nets ) ):
__lowercase , __lowercase = controlnet(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
# merge samples
if i == 0:
__lowercase , __lowercase = down_samples, mid_sample
else:
__lowercase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__UpperCAmelCase , __UpperCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
"""simple docstring"""
__lowercase = 0
__lowercase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__UpperCAmelCase , is_main_process=__UpperCAmelCase , save_function=__UpperCAmelCase , safe_serialization=__UpperCAmelCase , variant=__UpperCAmelCase , )
idx += 1
__lowercase = model_path_to_save + F'''_{idx}'''
@classmethod
def __magic_name__ ( cls , __UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
__lowercase = 0
__lowercase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__lowercase = pretrained_model_path
while os.path.isdir(__UpperCAmelCase ):
__lowercase = ControlNetModel.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
controlnets.append(__UpperCAmelCase )
idx += 1
__lowercase = pretrained_model_path + F'''_{idx}'''
logger.info(F'''{len(__UpperCAmelCase )} controlnets loaded from {pretrained_model_path}.''' )
if len(__UpperCAmelCase ) == 0:
raise ValueError(
F'''No ControlNets found under {os.path.dirname(__UpperCAmelCase )}. Expected at least {pretrained_model_path + "_0"}.''' )
return cls(__UpperCAmelCase )
| 703
|
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class lowerCamelCase__:
UpperCamelCase : float
UpperCamelCase : TreeNode | None = None
UpperCamelCase : TreeNode | None = None
def lowercase__ ( __UpperCamelCase : TreeNode | None ):
'''simple docstring'''
def is_valid_tree(__UpperCamelCase : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__UpperCamelCase ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
__UpperCamelCase : TreeNode | None , __UpperCamelCase : float , __UpperCamelCase : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , __UpperCamelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , __UpperCamelCase )
)
return is_binary_search_tree_recursive_check(__UpperCamelCase , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339
| 0
|
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> float:
"""simple docstring"""
def get_matched_characters(_UpperCAmelCase , _UpperCAmelCase ) -> str:
a_ = []
a_ = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a_ = int(max(0 , i - limit ) )
a_ = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCAmelCase )
a_ = F'''{_stra[0:_stra.index(_UpperCAmelCase )]} {_stra[_stra.index(_UpperCAmelCase ) + 1:]}'''
return "".join(_UpperCAmelCase )
# matching characters
a_ = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
a_ = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
a_ = len(_UpperCAmelCase )
# transposition
a_ = (
len([(ca, ca) for ca, ca in zip(_UpperCAmelCase , _UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
a_ = 0.0
else:
a_ = (
1
/ 3
* (
match_count / len(_UpperCAmelCase )
+ match_count / len(_UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a_ = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 697
|
'''simple docstring'''
from __future__ import annotations
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# We need to create solution object to save path.
a_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
a_ = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase )
if solved:
print('\n'.join(str(_UpperCAmelCase ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
a_ = 1
return True
a_ = (not i < 0) and (not j < 0) # Check lower bounds
a_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
a_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
a_ = 1
# check for directions
if (
run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase )
):
return True
a_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697
| 1
|
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __a ( A , A , **A ) -> Any:
'''simple docstring'''
A__ = AutoConfig.from_pretrained(__a , **__a )
A__ = AutoModelForSeqaSeqLM.from_config(__a )
model.save_pretrained(__a )
AutoTokenizer.from_pretrained(__a ).save_pretrained(__a )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 720
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowercase__ : str = StableDiffusionInstructPixaPixPipeline
lowercase__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
lowercase__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
A__ = CLIPTextModel(UpperCamelCase__ )
A__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ):
'''simple docstring'''
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("RGB" )
if str(UpperCamelCase__ ).startswith("mps" ):
A__ = torch.manual_seed(UpperCamelCase__ )
else:
A__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def lowercase_ ( self ):
'''simple docstring'''
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
A__ = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ = self.get_dummy_inputs(UpperCamelCase__ )
A__ = sd_pipe(**UpperCamelCase__ ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
A__ = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ = self.get_dummy_inputs(UpperCamelCase__ )
A__ = "french fries"
A__ = sd_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
A__ = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ = self.get_dummy_inputs(UpperCamelCase__ )
A__ = [inputs["prompt"]] * 2
A__ = np.array(inputs["image"] ).astype(np.floataa ) / 255.0
A__ = torch.from_numpy(UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
A__ = image / 2 + 0.5
A__ = image.permute(0 , 3 , 1 , 2 )
A__ = image.repeat(2 , 1 , 1 , 1 )
A__ = sd_pipe(**UpperCamelCase__ ).images
A__ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
A__ = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" )
A__ = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
A__ = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ = self.get_dummy_inputs(UpperCamelCase__ )
A__ = sd_pipe(**UpperCamelCase__ ).images
A__ = image[0, -3:, -3:, -1]
A__ = [round(UpperCamelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(UpperCamelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.get_dummy_components()
A__ = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
A__ = VaeImageProcessor(do_resize=UpperCamelCase__ , do_normalize=UpperCamelCase__ )
A__ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ = pipe(**self.get_dummy_inputs_by_type(UpperCamelCase__ , input_image_type="pt" ) )[0]
A__ = components["vae"]
A__ = self.get_dummy_inputs_by_type(UpperCamelCase__ , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
A__ = vae.encode(inputs[image_param] ).latent_dist.mode()
A__ = pipe(**UpperCamelCase__ )[0]
A__ = np.abs(out - out_latents_inputs ).max()
self.assertLess(UpperCamelCase__ , 1e-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self , UpperCamelCase__=0 ):
'''simple docstring'''
A__ = torch.manual_seed(UpperCamelCase__ )
A__ = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
A__ = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def lowercase_ ( self ):
'''simple docstring'''
A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A__ = self.get_inputs()
A__ = pipe(**UpperCamelCase__ ).images
A__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A__ = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=UpperCamelCase__ )
A__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A__ = self.get_inputs()
A__ = pipe(**UpperCamelCase__ ).images
A__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A__ = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=UpperCamelCase__ )
A__ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A__ = self.get_inputs()
A__ = pipe(**UpperCamelCase__ ).images
A__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A__ = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
A__ = 0
def callback_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
A__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
A__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A__ = latents[0, -3:, -3:, -1]
A__ = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
A__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A__ = latents[0, -3:, -3:, -1]
A__ = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
A__ = False
A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
A__ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A__ = self.get_inputs()
pipe(**UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase_ ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
A__ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A__ = self.get_inputs()
A__ = pipe(**UpperCamelCase__ )
A__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
A__ = inputs["image"].resize((5_04, 5_04) )
A__ = "timbrooks/instruct-pix2pix"
A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
UpperCamelCase__ , safety_checker=UpperCamelCase__ , )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A__ = pipe(**UpperCamelCase__ )
A__ = output.images[0]
A__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
A__ = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 261
| 0
|
def _a ( lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _a ( lowercase__ : int = 50_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [(i * (3 * i - 1)) // 2 for i in range(1 , lowercase__ )]
for i, pentagonal_i in enumerate(lowercase__ ):
for j in range(lowercase__ , len(lowercase__ ) ):
SCREAMING_SNAKE_CASE__ : List[str] = pentagonal_nums[j]
SCREAMING_SNAKE_CASE__ : List[str] = pentagonal_i + pentagonal_j
SCREAMING_SNAKE_CASE__ : Dict = pentagonal_j - pentagonal_i
if is_pentagonal(lowercase__ ) and is_pentagonal(lowercase__ ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85
|
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = """pytorch_model.bin"""
@dataclasses.dataclass
class __magic_name__ :
_SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class __magic_name__ :
_SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
_SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=lowercase__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=lowercase__ , metadata={'help': 'The name of the task to train on.'} , )
_SCREAMING_SNAKE_CASE : Optional[List[str]] = dataclasses.field(
default=lowercase__ , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class __magic_name__ :
_SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=10 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
_SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
_SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=lowercase__ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
_SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=lowercase__ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
_SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=lowercase__ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
_SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
_SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=100 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
_SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=lowercase__ , metadata={'help': 'Random seed for initialization.'} , )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
__snake_case = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__snake_case = dataset.filter(lambda SCREAMING_SNAKE_CASE : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__snake_case = int(eval_result * len(SCREAMING_SNAKE_CASE ) )
print(SCREAMING_SNAKE_CASE )
__snake_case = dataset.sort("probability" , reverse=SCREAMING_SNAKE_CASE )
__snake_case = dataset.select(range(SCREAMING_SNAKE_CASE ) )
__snake_case = dataset.remove_columns(["label", "probability"] )
__snake_case = dataset.rename_column("prediction" , "label" )
__snake_case = dataset.map(lambda SCREAMING_SNAKE_CASE : {"label": idalabel[example["label"]]} )
__snake_case = dataset.shuffle(seed=args.seed )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
else:
dataset.to_json(SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
__snake_case = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__snake_case = STModelArguments(model_name_or_path=SCREAMING_SNAKE_CASE )
__snake_case = STDataArguments(train_file=SCREAMING_SNAKE_CASE , infer_file=SCREAMING_SNAKE_CASE )
__snake_case = STTrainingArguments(output_dir=SCREAMING_SNAKE_CASE )
__snake_case = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(SCREAMING_SNAKE_CASE ).items():
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for key, value in kwargs.items():
if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Sanity checks
__snake_case = {}
__snake_case = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__snake_case = args.train_file
__snake_case = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__snake_case = args.eval_file
for key in data_files:
__snake_case = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
__snake_case = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
__snake_case = F'''{args.output_dir}/self-train_iter-{{}}'''.format
__snake_case = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
__snake_case = None
__snake_case = None
__snake_case = 0
__snake_case = False
# Show the progress bar
__snake_case = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__snake_case = data_dir_format(SCREAMING_SNAKE_CASE )
assert os.path.exists(SCREAMING_SNAKE_CASE )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "stage-1" )
__snake_case = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
arguments_dict.update({key: value} )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "best-checkpoint" , SCREAMING_SNAKE_CASE )
if os.path.exists(SCREAMING_SNAKE_CASE ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , SCREAMING_SNAKE_CASE )
finetune(**SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE )
logger.info("Self-training job completed: iteration: %d, stage: 1." , SCREAMING_SNAKE_CASE )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "best-checkpoint" )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "stage-2" )
# Update arguments_dict
__snake_case = model_path
__snake_case = data_files["train"]
__snake_case = current_output_dir
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "best-checkpoint" , SCREAMING_SNAKE_CASE )
if os.path.exists(SCREAMING_SNAKE_CASE ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , SCREAMING_SNAKE_CASE )
finetune(**SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE )
logger.info("Self-training job completed: iteration: %d, stage: 2." , SCREAMING_SNAKE_CASE )
__snake_case = iteration
__snake_case = data_dir_format(iteration + 1 )
__snake_case = AutoConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , "best-checkpoint" ) )
__snake_case = config.idalabel
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "eval_results_best-checkpoint.json" )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "test_results_best-checkpoint.json" )
assert os.path.exists(SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , "r" ) as f:
__snake_case = float(json.load(SCREAMING_SNAKE_CASE )[args.eval_metric] )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "infer_output_best-checkpoint.csv" )
assert os.path.exists(SCREAMING_SNAKE_CASE )
# Loading the dataset from local csv or json files.
__snake_case = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
__snake_case = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
shutil.copy(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(SCREAMING_SNAKE_CASE ):
shutil.copy(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__snake_case = eval_result
if best_iteration is None:
__snake_case = new_iteration
__snake_case = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__snake_case = new_iteration
__snake_case = new_eval_result
__snake_case = 0
else:
if new_eval_result == best_eval_result:
__snake_case = new_iteration
__snake_case = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__snake_case = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , SCREAMING_SNAKE_CASE )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE , "eval_results_best-iteration.json" ) , )
| 163
| 0
|
from __future__ import annotations
import time
import numpy as np
_UpperCAmelCase = [8, 5, 9, 7]
_UpperCAmelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_UpperCAmelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case_ :
def __init__( self : str , _snake_case : list[int] , _snake_case : list[list[int]] , _snake_case : list[list[int]] , )->None:
'''simple docstring'''
__lowerCAmelCase : Tuple = claim_vector
__lowerCAmelCase : List[Any] = allocated_resources_table
__lowerCAmelCase : List[Any] = maximum_claim_table
def UpperCAmelCase__ ( self : Any )->list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCAmelCase__ ( self : int )->list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCAmelCase__ ( self : List[Any] )->list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCAmelCase__ ( self : Tuple )->dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(_snake_case ): i for i in self.__need()}
def UpperCAmelCase__ ( self : Any , **_snake_case : Dict )->None:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.__need()
__lowerCAmelCase : Union[str, Any] = self.__allocated_resources_table
__lowerCAmelCase : Optional[Any] = self.__available_resources()
__lowerCAmelCase : List[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
__lowerCAmelCase : Optional[int] = False
for each_need in need_list:
__lowerCAmelCase : Union[str, Any] = True
for index, need in enumerate(_snake_case ):
if need > available_resources[index]:
__lowerCAmelCase : List[Any] = False
break
if execution:
__lowerCAmelCase : Optional[Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase : str = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(_snake_case )
# update available/freed resources stack
__lowerCAmelCase : List[str] = np.array(_snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(_snake_case ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def UpperCAmelCase__ ( self : Dict )->Union[str, Any]:
'''simple docstring'''
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(_snake_case ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(_snake_case ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(_snake_case ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(_snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_UpperCAmelCase = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_UpperCAmelCase = concatenate_datasets
_UpperCAmelCase = DownloadConfig
_UpperCAmelCase = DownloadManager
_UpperCAmelCase = DownloadMode
_UpperCAmelCase = DownloadConfig
_UpperCAmelCase = DownloadMode
_UpperCAmelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 240
| 0
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
'''simple docstring'''
debug_launcher(test_script.main )
def _a ( self ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 102
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
A__ : Union[str, Any] = logging.get_logger(__name__)
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''upernet'''
def __init__( self : Optional[int] , __a : Dict=None , __a : List[str]=512 , __a : int=0.0_2 , __a : Optional[Any]=[1, 2, 3, 6] , __a : Union[str, Any]=True , __a : Any=0.4 , __a : int=384 , __a : int=256 , __a : str=1 , __a : List[Any]=False , __a : Dict=255 , **__a : Dict , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__a )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
__snake_case : Optional[int] = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(__a , __a ):
__snake_case : List[Any] = backbone_config.get('model_type' )
__snake_case : List[str] = CONFIG_MAPPING[backbone_model_type]
__snake_case : int = config_class.from_dict(__a )
__snake_case : Any = backbone_config
__snake_case : Tuple = hidden_size
__snake_case : str = initializer_range
__snake_case : Dict = pool_scales
__snake_case : Optional[Any] = use_auxiliary_head
__snake_case : Optional[Any] = auxiliary_loss_weight
__snake_case : Optional[int] = auxiliary_in_channels
__snake_case : Dict = auxiliary_channels
__snake_case : Any = auxiliary_num_convs
__snake_case : Optional[Any] = auxiliary_concat_input
__snake_case : List[str] = loss_ignore_index
def A_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
__snake_case : str = self.backbone_config.to_dict()
__snake_case : Optional[int] = self.__class__.model_type
return output
| 286
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : List[Any] = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( a__ ):
SCREAMING_SNAKE_CASE__ ="""falcon"""
SCREAMING_SNAKE_CASE__ =["""past_key_values"""]
def __init__( self, _a=6_50_24, _a=45_44, _a=32, _a=71, _a=1E-5, _a=0.02, _a=True, _a=0.0, _a=0.0, _a=None, _a=False, _a=False, _a=True, _a=True, _a=False, _a=11, _a=11, **_a, ) -> int:
__SCREAMING_SNAKE_CASE = vocab_size
# Backward compatibility with n_embed kwarg
__SCREAMING_SNAKE_CASE = kwargs.pop("n_embed", lowercase__ )
__SCREAMING_SNAKE_CASE = hidden_size if n_embed is None else n_embed
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = num_attention_heads if num_kv_heads is None else num_kv_heads
__SCREAMING_SNAKE_CASE = alibi
__SCREAMING_SNAKE_CASE = new_decoder_architecture
__SCREAMING_SNAKE_CASE = multi_query # Ignored when new_decoder_architecture is True
__SCREAMING_SNAKE_CASE = parallel_attn
__SCREAMING_SNAKE_CASE = bias
super().__init__(bos_token_id=lowercase__, eos_token_id=lowercase__, **lowercase__ )
@property
def __lowerCAmelCase ( self ) -> Any:
return self.hidden_size // self.num_attention_heads
@property
def __lowerCAmelCase ( self ) -> Tuple:
return not self.alibi
| 703
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _A ( __snake_case :int ) -> Optional[int]:
"""simple docstring"""
if (
(cp >= 0x4E_00 and cp <= 0x9F_FF)
or (cp >= 0x34_00 and cp <= 0x4D_BF) #
or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) #
or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) #
or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) #
or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) #
or (cp >= 0xF9_00 and cp <= 0xFA_FF)
or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) #
): #
return True
return False
def _A ( __snake_case :str ) -> int:
"""simple docstring"""
for char in word:
__SCREAMING_SNAKE_CASE = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def _A ( __snake_case :List[str] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = set()
for token in tokens:
__SCREAMING_SNAKE_CASE = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
__SCREAMING_SNAKE_CASE = list(__snake_case )
return word_list
def _A ( __snake_case :List[str] , __snake_case :set() ) -> Any:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
__SCREAMING_SNAKE_CASE = max([len(__snake_case ) for w in chinese_word_set] )
__SCREAMING_SNAKE_CASE = bert_tokens
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, len(__snake_case )
while start < end:
__SCREAMING_SNAKE_CASE = True
if is_chinese(bert_word[start] ):
__SCREAMING_SNAKE_CASE = min(end - start , __snake_case )
for i in range(__snake_case , 1 , -1 ):
__SCREAMING_SNAKE_CASE = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__SCREAMING_SNAKE_CASE = "##" + bert_word[j]
__SCREAMING_SNAKE_CASE = start + i
__SCREAMING_SNAKE_CASE = False
break
if single_word:
start += 1
return bert_word
def _A ( __snake_case :List[str] , __snake_case :LTP , __snake_case :BertTokenizer ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i in range(0 , len(__snake_case ) , 100 ):
__SCREAMING_SNAKE_CASE = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__SCREAMING_SNAKE_CASE = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
__SCREAMING_SNAKE_CASE = []
for i in range(0 , len(__snake_case ) , 100 ):
__SCREAMING_SNAKE_CASE = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__snake_case , truncation=__snake_case , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__snake_case ) == len(__snake_case )
__SCREAMING_SNAKE_CASE = []
for input_ids, chinese_word in zip(__snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE = []
for id in input_ids:
__SCREAMING_SNAKE_CASE = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
__SCREAMING_SNAKE_CASE = add_sub_symbol(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
__SCREAMING_SNAKE_CASE = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def _A ( __snake_case :Tuple ) -> Any:
"""simple docstring"""
with open(args.file_name , "r" , encoding="utf-8" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__SCREAMING_SNAKE_CASE = LTP(args.ltp ) # faster in GPU device
__SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained(args.bert )
__SCREAMING_SNAKE_CASE = prepare_ref(__snake_case , __snake_case , __snake_case )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
__SCREAMING_SNAKE_CASE = [json.dumps(__snake_case ) + "\n" for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
_snake_case : Union[str, Any] = parser.parse_args()
main(args)
| 214
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowercase_ ( a_ ):
def __init__( self : int , *_lowercase : Dict , **_lowercase : Any ):
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 308
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = []
for current_row_idx in range(lowerCAmelCase__ ):
lowerCamelCase_ = populate_current_row(lowerCAmelCase__ ,lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for current_col_idx in range(1 ,lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return current_row
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ = above_to_left_elt + above_to_right_elt
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = [[1]]
for row_index in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = [0] + result[-1] + [0]
lowerCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ = sum(divmod(lowerCAmelCase__ ,2 ) )
lowerCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
lowerCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ ) -> None:
lowerCamelCase_ = f"{func.__name__}({value})"
lowerCamelCase_ = timeit(f"__main__.{call}" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 29
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = ['image_processor', 'tokenizer']
__UpperCAmelCase = 'ViltImageProcessor'
__UpperCAmelCase = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Optional[Any], _snake_case : Union[str, Any]=None, _snake_case : List[str]=None, **_snake_case : Optional[int] ):
'''simple docstring'''
snake_case : Tuple =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', _snake_case, )
snake_case : int =kwargs.pop('''feature_extractor''' )
snake_case : List[Any] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case, _snake_case )
snake_case : Optional[int] =self.image_processor
def __call__( self : Tuple, _snake_case : Any, _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, _snake_case : bool = True, _snake_case : Union[bool, str, PaddingStrategy] = False, _snake_case : Union[bool, str, TruncationStrategy] = None, _snake_case : Optional[int] = None, _snake_case : int = 0, _snake_case : Optional[int] = None, _snake_case : Optional[bool] = None, _snake_case : Optional[bool] = None, _snake_case : bool = False, _snake_case : bool = False, _snake_case : bool = False, _snake_case : bool = False, _snake_case : bool = True, _snake_case : Optional[Union[str, TensorType]] = None, **_snake_case : str, ):
'''simple docstring'''
snake_case : Optional[int] =self.tokenizer(
text=_snake_case, add_special_tokens=_snake_case, padding=_snake_case, truncation=_snake_case, max_length=_snake_case, stride=_snake_case, pad_to_multiple_of=_snake_case, return_token_type_ids=_snake_case, return_attention_mask=_snake_case, return_overflowing_tokens=_snake_case, return_special_tokens_mask=_snake_case, return_offsets_mapping=_snake_case, return_length=_snake_case, verbose=_snake_case, return_tensors=_snake_case, **_snake_case, )
# add pixel_values + pixel_mask
snake_case : int =self.image_processor(_snake_case, return_tensors=_snake_case )
encoding.update(_snake_case )
return encoding
def __snake_case ( self : List[Any], *_snake_case : List[Any], **_snake_case : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case, **_snake_case )
def __snake_case ( self : Union[str, Any], *_snake_case : Any, **_snake_case : Dict ):
'''simple docstring'''
return self.tokenizer.decode(*_snake_case, **_snake_case )
@property
def __snake_case ( self : str ):
'''simple docstring'''
snake_case : Union[str, Any] =self.tokenizer.model_input_names
snake_case : int =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', _snake_case, )
return self.image_processor_class
@property
def __snake_case ( self : Any ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', _snake_case, )
return self.image_processor
| 705
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class lowerCAmelCase_ :
def __init__( self : List[str], _snake_case : int ):
'''simple docstring'''
snake_case : Optional[Any] =value
snake_case : Node | None =None
snake_case : Node | None =None
class lowerCAmelCase_ :
def __init__( self : Tuple, _snake_case : Node ):
'''simple docstring'''
snake_case : Optional[int] =tree
def __snake_case ( self : str, _snake_case : Node | None ):
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ):
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136
| 0
|
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : List[str]=13 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : List[Any]=True , _lowercase : Tuple=True , _lowercase : Union[str, Any]=True , _lowercase : Optional[Any]=99 , _lowercase : Union[str, Any]=64 , _lowercase : int=32 , _lowercase : Optional[Any]=5 , _lowercase : List[str]=4 , _lowercase : Optional[Any]=37 , _lowercase : List[str]="gelu" , _lowercase : Any=0.1 , _lowercase : List[Any]=0.1 , _lowercase : Any=5_12 , _lowercase : Tuple=16 , _lowercase : List[str]=2 , _lowercase : Union[str, Any]=0.02 , _lowercase : Tuple=3 , _lowercase : Any=4 , _lowercase : List[str]=None , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_input_mask
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = embedding_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_labels
__UpperCAmelCase = num_choices
__UpperCAmelCase = scope
def a ( self : List[str] ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self : Tuple ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def a ( self : List[Any] , _lowercase : List[str] , _lowercase : int , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : Dict , _lowercase : List[str] , _lowercase : List[Any] ):
__UpperCAmelCase = MegatronBertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
__UpperCAmelCase = model(_lowercase , token_type_ids=_lowercase )
__UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : Any , _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : List[str] , _lowercase : Dict ):
__UpperCAmelCase = MegatronBertForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self : str , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : str , _lowercase : List[Any] , _lowercase : str , _lowercase : str , _lowercase : Union[str, Any] ):
__UpperCAmelCase = MegatronBertForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self : Optional[Any] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : str , _lowercase : Dict , _lowercase : Tuple , _lowercase : Tuple , _lowercase : Any ):
__UpperCAmelCase = MegatronBertForNextSentencePrediction(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def a ( self : List[Any] , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : str , _lowercase : List[Any] , _lowercase : int , _lowercase : List[Any] , _lowercase : Dict ):
__UpperCAmelCase = MegatronBertForPreTraining(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , next_sentence_label=_lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def a ( self : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : List[Any] , _lowercase : Optional[int] , _lowercase : Any , _lowercase : List[str] , _lowercase : List[str] , _lowercase : List[Any] ):
__UpperCAmelCase = MegatronBertForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self : int , _lowercase : List[Any] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : List[Any] ):
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = MegatronBertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self : Optional[Any] , _lowercase : str , _lowercase : int , _lowercase : List[str] , _lowercase : Dict , _lowercase : List[str] , _lowercase : int , _lowercase : Dict ):
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = MegatronBertForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self : Dict , _lowercase : Tuple , _lowercase : Tuple , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : Optional[Any] ):
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = MegatronBertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a ( self : str ):
__UpperCAmelCase = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ : Union[str, Any] = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : Optional[Any] = True
# test_resize_embeddings = False
a__ : Dict = False
def a ( self : Union[str, Any] , _lowercase : List[Any] , _lowercase : Optional[int] , _lowercase : List[str]=False ):
__UpperCAmelCase = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class in get_values(_lowercase ):
__UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase )
__UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def a ( self : List[Any] ):
__UpperCAmelCase = MegatronBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def a ( self : str ):
self.config_tester.run_common_tests()
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_lowercase )
def a ( self : str ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_lowercase )
def a ( self : List[Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_lowercase )
def a ( self : Tuple ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_lowercase )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_lowercase )
def a ( self : Dict ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_lowercase )
def a ( self : Dict ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_lowercase )
def a ( self : str ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_lowercase )
def lowercase__ ( snake_case_ :int ):
return torch.tensor(
snake_case_ , dtype=torch.long , device=snake_case_ , )
_lowercase : List[str] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def a ( self : Optional[Any] ):
__UpperCAmelCase = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
__UpperCAmelCase = os.path.join(os.environ['''MYDIR'''] , _lowercase )
__UpperCAmelCase = MegatronBertModel.from_pretrained(_lowercase )
model.to(_lowercase )
model.half()
__UpperCAmelCase = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__UpperCAmelCase = model(_lowercase )[0]
__UpperCAmelCase = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , _lowercase )
__UpperCAmelCase = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3 ):
for jj in range(3 ):
__UpperCAmelCase = output[0, ii, jj]
__UpperCAmelCase = expected[3 * ii + jj]
__UpperCAmelCase = '''ii={} jj={} a={} b={}'''.format(_lowercase , _lowercase , _lowercase , _lowercase )
self.assertTrue(math.isclose(_lowercase , _lowercase , rel_tol=_lowercase , abs_tol=_lowercase ) , msg=_lowercase )
| 49
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "camembert"
def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : Tuple ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = """▁"""
__UpperCAmelCase = {"""vocab_file""": """spiece.model"""}
__UpperCAmelCase = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
__UpperCAmelCase = {
"""google/reformer-crime-and-punishment""": 524_288,
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Any =VOCAB_FILES_NAMES
lowerCamelCase : List[Any] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Dict =["input_ids", "attention_mask"]
def __init__( self : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : List[str]="</s>" , lowerCAmelCase : Optional[Any]="<unk>" , lowerCAmelCase : List[Any]=[] , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : Union[str, Any] , ) -> None:
"""simple docstring"""
__lowerCAmelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
__lowerCAmelCase : str = vocab_file
__lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict[str, int]:
"""simple docstring"""
__lowerCAmelCase : Tuple = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = self.__dict__.copy()
__lowerCAmelCase : Any = None
return state
def __setstate__( self : List[str] , lowerCAmelCase : Dict ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCAmelCase : Union[str, Any] = {}
__lowerCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : int ) -> Tuple:
"""simple docstring"""
return self.sp_model.piece_to_id(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
__lowerCAmelCase : List[Any] = self.sp_model.IdToPiece(lowerCAmelCase )
return token
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = []
__lowerCAmelCase : List[Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase ) + token
__lowerCAmelCase : List[str] = []
else:
current_sub_tokens.append(lowerCAmelCase )
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase : Any = os.path.join(
lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , """wb""" ) as fi:
__lowerCAmelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
| 218
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""MobileViTFeatureExtractor"""]
__UpperCAmelCase = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 218
| 1
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__snake_case : int = logging.get_logger(__name__)
__snake_case : Optional[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
__snake_case : List[str] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _UpperCAmelCase ( a__ , a__ , a__ , a__ , a__):
'''simple docstring'''
for attribute in key.split("""."""):
a_ : List[Any] = getattr(a__ , a__)
if weight_type is not None:
a_ : List[Any] = getattr(a__ , a__).shape
else:
a_ : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a_ : Union[str, Any] = value
elif weight_type == "weight_g":
a_ : List[Any] = value
elif weight_type == "weight_v":
a_ : int = value
elif weight_type == "bias":
a_ : Dict = value
else:
a_ : str = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''')
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
a_ : str = []
a_ : int = fairseq_model.state_dict()
a_ : int = hf_model.feature_extractor
a_ : Optional[int] = hf_model.adapter
for name, value in fairseq_dict.items():
a_ : Dict = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == """group""" , )
a_ : str = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""]):
load_adapter(a__ , a__ , a__ , a__)
a_ : Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""")[-1] == name.split(""".""")[0]:
a_ : int = True
if "*" in mapped_key:
a_ : Dict = name.split(a__)[0].split(""".""")[-2]
a_ : str = mapped_key.replace("""*""" , a__)
if "weight_g" in name:
a_ : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
a_ : str = """weight_v"""
elif "bias" in name:
a_ : Optional[int] = """bias"""
elif "weight" in name:
a_ : Any = """weight"""
else:
a_ : int = None
set_recursively(a__ , a__ , a__ , a__ , a__)
continue
if not is_used:
unused_weights.append(a__)
logger.warning(f'''Unused weights: {unused_weights}''')
def _UpperCAmelCase ( a__ , a__ , a__ , a__ , a__):
'''simple docstring'''
a_ : int = full_name.split("""conv_layers.""")[-1]
a_ : str = name.split(""".""")
a_ : Union[str, Any] = int(items[0])
a_ : int = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a_ : int = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a_ : List[str] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a_ : int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a_ : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(a__)
def _UpperCAmelCase ( a__ , a__ , a__ , a__):
'''simple docstring'''
a_ : int = full_name.split("""adaptor.""")[-1]
a_ : List[str] = name.split(""".""")
if items[1].isdigit():
a_ : Tuple = int(items[1])
else:
a_ : str = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
a_ : Optional[Any] = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''')
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
a_ : Any = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
a_ : List[Any] = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''')
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
a_ : Tuple = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''')
elif isinstance(a__ , a__):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
a_ : List[Any] = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''')
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
a_ : Dict = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''')
else:
unused_weights.append(a__)
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ , a_ : Dict = emb.weight.shape
a_ : Tuple = nn.Linear(a__ , a__ , bias=a__)
a_ : Tuple = emb.weight.data
return lin_layer
@torch.no_grad()
def _UpperCAmelCase ( a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
'''simple docstring'''
a_ : int = WavaVecaConfig.from_pretrained(
a__ , add_adapter=a__ , adapter_stride=a__ , adapter_kernel_size=a__ , use_auth_token=a__ , output_hidden_size=a__ , )
a_ : int = MBartConfig.from_pretrained(a__)
# load model
a_ , a_ , a_ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""")[:-1]),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
a_ : int = model[0].eval()
# load feature extractor
a_ : Dict = WavaVecaFeatureExtractor.from_pretrained(a__ , use_auth_token=a__)
# set weights for wav2vec2 encoder
a_ : str = WavaVecaModel(a__)
recursively_load_weights_wavaveca(model.encoder , a__)
# load decoder weights
a_ : Tuple = MBartForCausalLM(a__)
a_ , a_ : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a__)
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''')
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''')
a_ : Optional[int] = SpeechEncoderDecoderModel(encoder=a__ , decoder=a__)
a_ : Any = False
a_ : List[str] = MBartaaTokenizer(a__)
tokenizer.save_pretrained(a__)
a_ : str = hf_wavavec.config.to_dict()
a_ : Optional[int] = tokenizer.pad_token_id
a_ : List[Any] = tokenizer.bos_token_id
a_ : Tuple = tokenizer.eos_token_id
a_ : List[Any] = """mbart50"""
a_ : str = """wav2vec2"""
a_ : str = tokenizer.eos_token_id
a_ : Union[str, Any] = 2_5_0_0_0_4
a_ : List[str] = tokenizer.eos_token_id
a_ : Tuple = SpeechEncoderDecoderConfig.from_dict(a__)
hf_wavavec.save_pretrained(a__)
feature_extractor.save_pretrained(a__)
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=10_24, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=25_00_04, type=int, help="""`decoder_start_token_id` of model config""")
__snake_case : int = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 540
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Tuple = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
__snake_case : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _UpperCAmelCase ( a__):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
a_ : Tuple = model_type_to_module_name(a__)
a_ : Any = importlib.import_module(f'''.{module_name}''' , """transformers.models""")
try:
return getattr(a__ , a__)
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(a__ , """__name__""" , a__) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a_ : Tuple = importlib.import_module("""transformers""")
if hasattr(a__ , a__):
return getattr(a__ , a__)
return None
def _UpperCAmelCase ( a__ , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , **a__ , ):
'''simple docstring'''
a_ : List[str] = get_file_from_repo(
a__ , a__ , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""")
return {}
with open(a__ , encoding="""utf-8""") as reader:
return json.load(a__)
class A__:
"""simple docstring"""
def __init__( self ) -> List[str]:
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_lowercase )
def UpperCamelCase__ ( cls , _lowercase , **_lowercase ) -> Dict:
a_ : Tuple = kwargs.pop("""config""" , _lowercase )
a_ : Dict = kwargs.pop("""trust_remote_code""" , _lowercase )
a_ : Dict = True
a_ , a_ : List[str] = FeatureExtractionMixin.get_feature_extractor_dict(_lowercase , **_lowercase )
a_ : Tuple = config_dict.get("""feature_extractor_type""" , _lowercase )
a_ : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
a_ : Any = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_lowercase , _lowercase ):
a_ : List[str] = AutoConfig.from_pretrained(_lowercase , **_lowercase )
# It could be in `config.feature_extractor_type``
a_ : List[Any] = getattr(_lowercase , """feature_extractor_type""" , _lowercase )
if hasattr(_lowercase , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
a_ : List[str] = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
a_ : int = feature_extractor_class_from_name(_lowercase )
a_ : Dict = feature_extractor_auto_map is not None
a_ : Union[str, Any] = feature_extractor_class is not None or type(_lowercase ) in FEATURE_EXTRACTOR_MAPPING
a_ : Optional[Any] = resolve_trust_remote_code(
_lowercase , _lowercase , _lowercase , _lowercase )
if has_remote_code and trust_remote_code:
a_ : int = get_class_from_dynamic_module(
_lowercase , _lowercase , **_lowercase )
a_ : Tuple = kwargs.pop("""code_revision""" , _lowercase )
if os.path.isdir(_lowercase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_lowercase , **_lowercase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_lowercase , **_lowercase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_lowercase ) in FEATURE_EXTRACTOR_MAPPING:
a_ : List[str] = FEATURE_EXTRACTOR_MAPPING[type(_lowercase )]
return feature_extractor_class.from_dict(_lowercase , **_lowercase )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def UpperCamelCase__ ( _lowercase , _lowercase ) -> List[Any]:
FEATURE_EXTRACTOR_MAPPING.register(_lowercase , _lowercase )
| 540
| 1
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
# Load configuration defined in the metadata file
with open(UpperCamelCase ) as metadata_file:
A = json.load(UpperCamelCase )
A = LukeConfig(use_entity_aware_attention=UpperCamelCase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
A = torch.load(UpperCamelCase , map_location="cpu" )
# Load the entity vocab file
A = load_entity_vocab(UpperCamelCase )
A = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A = AddedToken("<ent>" , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
A = AddedToken("<ent2>" , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(UpperCamelCase )
with open(os.path.join(UpperCamelCase , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(UpperCamelCase , UpperCamelCase )
A = LukeTokenizer.from_pretrained(UpperCamelCase )
# Initialize the embeddings of the special tokens
A = state_dict["embeddings.word_embeddings.weight"]
A = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
A = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
A = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A = F"encoder.layer.{layer_index}.attention.self."
A = state_dict[prefix + matrix_name]
A = state_dict[prefix + matrix_name]
A = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A = state_dict["entity_embeddings.entity_embeddings.weight"]
A = entity_emb[entity_vocab["[MASK]"]]
A = LukeModel(config=UpperCamelCase ).eval()
A, A = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
if not (len(UpperCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"Missing keys {', '.join(UpperCamelCase )}. Expected only missing embeddings.position_ids" )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}" )
# Check outputs
A = LukeTokenizer.from_pretrained(UpperCamelCase , task="entity_classification" )
A = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
A = (39, 42)
A = tokenizer(UpperCamelCase , entity_spans=[span] , add_prefix_space=UpperCamelCase , return_tensors="pt" )
A = model(**UpperCamelCase )
# Verify word hidden states
if model_size == "large":
A = torch.Size((1, 42, 1_024) )
A = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
A = torch.Size((1, 42, 768) )
A = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A = torch.Size((1, 1, 1_024) )
A = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
A = torch.Size((1, 1, 768) )
A = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(UpperCamelCase ) )
model.save_pretrained(UpperCamelCase )
def A__ ( UpperCamelCase ):
A = {}
with open(UpperCamelCase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(UpperCamelCase ):
A, A = line.rstrip().split("\t" )
A = index
return entity_vocab
if __name__ == "__main__":
_snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_snake_case : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 718
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : Optional[int] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : Optional[int] = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
_snake_case : Union[str, Any] = {
'gpt2': 1024,
'gpt2-medium': 1024,
'gpt2-large': 1024,
'gpt2-xl': 1024,
'distilgpt2': 1024,
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = GPTaTokenizer
def __init__( self :Optional[Any] , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[Any]=None , __UpperCamelCase :str="<|endoftext|>" , __UpperCamelCase :Tuple="<|endoftext|>" , __UpperCamelCase :Dict="<|endoftext|>" , __UpperCamelCase :Union[str, Any]=False , **__UpperCamelCase :Union[str, Any] , ):
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
A = kwargs.pop("add_bos_token" , __UpperCamelCase )
A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
A = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
A = add_prefix_space
A = pre_tok_class(**__UpperCamelCase )
A = add_prefix_space
def lowerCamelCase ( self :Any , *__UpperCamelCase :Optional[int] , **__UpperCamelCase :Any ):
A = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase ( self :Dict , *__UpperCamelCase :List[str] , **__UpperCamelCase :Optional[int] ):
A = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str , __UpperCamelCase :Optional[str] = None ):
A = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def lowerCamelCase ( self :Dict , __UpperCamelCase :"Conversation" ):
A = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] )
if len(__UpperCamelCase ) > self.model_max_length:
A = input_ids[-self.model_max_length :]
return input_ids
| 524
| 0
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def a ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def a ( self : Dict ) -> Union[str, Any]:
lowerCAmelCase__ = self.dummy_uncond_unet
lowerCAmelCase__ = ScoreSdeVeScheduler()
lowerCAmelCase__ = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
sde_ve.to(SCREAMING_SNAKE_CASE__ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=SCREAMING_SNAKE_CASE__ ).images
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )[
0
]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : int ) -> Union[str, Any]:
lowerCAmelCase__ = "google/ncsnpp-church-256"
lowerCAmelCase__ = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
sde_ve.to(SCREAMING_SNAKE_CASE__ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=SCREAMING_SNAKE_CASE__ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase__ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 61
|
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
lowercase__ = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowercase__ ( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Any, lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = AudioClassificationPipeline(model=lowerCamelCase, feature_extractor=lowerCamelCase )
# test with a raw waveform
lowercase__ = np.zeros((34_000,) )
lowercase__ = np.zeros((14_000,) )
return audio_classifier, [audioa, audio]
def lowercase__ ( self : str, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = examples
lowercase__ = audio_classifier(lowerCamelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowerCamelCase, [
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
], )
lowercase__ = audio_classifier(lowerCamelCase, top_k=1 )
self.assertEqual(
lowerCamelCase, [
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
], )
self.run_torchaudio(lowerCamelCase )
@require_torchaudio
def lowercase__ ( self : Optional[int], lowerCamelCase : List[Any] ):
'''simple docstring'''
import datasets
# test with a local file
lowercase__ = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' )
lowercase__ = dataset[0]['''audio''']['''array''']
lowercase__ = audio_classifier(lowerCamelCase )
self.assertEqual(
lowerCamelCase, [
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
], )
@require_torch
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = '''anton-l/wav2vec2-random-tiny-classifier'''
lowercase__ = pipeline('''audio-classification''', model=lowerCamelCase )
lowercase__ = np.ones((8_000,) )
lowercase__ = audio_classifier(lowerCamelCase, top_k=4 )
lowercase__ = [
{'''score''': 0.0842, '''label''': '''no'''},
{'''score''': 0.0838, '''label''': '''up'''},
{'''score''': 0.0837, '''label''': '''go'''},
{'''score''': 0.0834, '''label''': '''right'''},
]
lowercase__ = [
{'''score''': 0.0845, '''label''': '''stop'''},
{'''score''': 0.0844, '''label''': '''on'''},
{'''score''': 0.0841, '''label''': '''right'''},
{'''score''': 0.0834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(lowerCamelCase, decimals=4 ), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowercase__ = {'''array''': np.ones((8_000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
lowercase__ = audio_classifier(lowerCamelCase, top_k=4 )
self.assertIn(nested_simplify(lowerCamelCase, decimals=4 ), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def lowercase__ ( self : List[str] ):
'''simple docstring'''
import datasets
lowercase__ = '''superb/wav2vec2-base-superb-ks'''
lowercase__ = pipeline('''audio-classification''', model=lowerCamelCase )
lowercase__ = datasets.load_dataset('''anton-l/superb_dummy''', '''ks''', split='''test''' )
lowercase__ = np.array(dataset[3]['''speech'''], dtype=np.floataa )
lowercase__ = audio_classifier(lowerCamelCase, top_k=4 )
self.assertEqual(
nested_simplify(lowerCamelCase, decimals=3 ), [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
], )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
| 183
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowercase__ :Dict = logging.get_logger(__name__)
lowercase__ :List[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
lowercase__ :Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Tuple:
"""simple docstring"""
for attribute in key.split('''.''' ):
__UpperCAmelCase : List[Any] = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
__UpperCAmelCase : Any = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
__UpperCAmelCase : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__UpperCAmelCase : Any = value
elif weight_type == "weight_g":
__UpperCAmelCase : Optional[int] = value
elif weight_type == "weight_v":
__UpperCAmelCase : int = value
elif weight_type == "bias":
__UpperCAmelCase : Any = value
else:
__UpperCAmelCase : str = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Optional[Any] = fairseq_model.state_dict()
__UpperCAmelCase : int = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__UpperCAmelCase : Tuple = None
for name, value in fairseq_dict.items():
__UpperCAmelCase : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCAmelCase : int = True
elif name.split('''.''' )[0] == "proj":
__UpperCAmelCase : Tuple = fairseq_model.proj
__UpperCAmelCase : Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__UpperCAmelCase : Union[str, Any] = True
if "*" in mapped_key:
__UpperCAmelCase : int = name.split(UpperCAmelCase_ )[0].split('''.''' )[-2]
__UpperCAmelCase : Dict = mapped_key.replace('''*''' , UpperCAmelCase_ )
if "weight_g" in name:
__UpperCAmelCase : Optional[Any] = '''weight_g'''
elif "weight_v" in name:
__UpperCAmelCase : str = '''weight_v'''
elif "bias" in name:
__UpperCAmelCase : Optional[Any] = '''bias'''
elif "weight" in name:
__UpperCAmelCase : List[str] = '''weight'''
else:
__UpperCAmelCase : Dict = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Any:
"""simple docstring"""
__UpperCAmelCase : int = full_name.split('''conv_layers.''' )[-1]
__UpperCAmelCase : Dict = name.split('''.''' )
__UpperCAmelCase : Tuple = int(items[0] )
__UpperCAmelCase : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__UpperCAmelCase : Tuple = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__UpperCAmelCase : Optional[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__UpperCAmelCase : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__UpperCAmelCase : Union[str, Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Optional[int]:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : str = emb.weight.shape
__UpperCAmelCase : Any = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ , bias=UpperCAmelCase_ )
__UpperCAmelCase : str = emb.weight.data
return lin_layer
def lowerCamelCase_ ( UpperCAmelCase_ ) ->List[str]:
"""simple docstring"""
with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as f:
__UpperCAmelCase : Optional[int] = f.readlines()
__UpperCAmelCase : int = [line.split(''' ''' )[0] for line in lines]
__UpperCAmelCase : List[str] = len(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(UpperCAmelCase_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) ->int:
"""simple docstring"""
__UpperCAmelCase : str = WavaVecaConfig.from_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : str = SpeechaTextaConfig.from_pretrained(
UpperCAmelCase_ , vocab_size=UpperCAmelCase_ , decoder_layers=UpperCAmelCase_ , do_stable_layer_norm=UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__UpperCAmelCase : Optional[Any] = model[0].eval()
# set weights for wav2vec2 encoder
__UpperCAmelCase : str = WavaVecaModel(UpperCAmelCase_ )
__UpperCAmelCase : Dict = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase_ )
__UpperCAmelCase : List[str] = SpeechaTextaForCausalLM(UpperCAmelCase_ )
__UpperCAmelCase , __UpperCAmelCase : Dict = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase_ )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
__UpperCAmelCase : int = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
__UpperCAmelCase : Optional[Any] = SpeechEncoderDecoderModel(encoder=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
__UpperCAmelCase : str = False
# add projection layer
__UpperCAmelCase : int = nn.Parameter(projection_layer.weight )
__UpperCAmelCase : Optional[int] = nn.Parameter(projection_layer.bias )
__UpperCAmelCase : Optional[Any] = create_vocab_dict(UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
__UpperCAmelCase : int = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase_ , '''vocab.json''' ) )
tokenizer.save_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = hf_wavavec.config.to_dict()
__UpperCAmelCase : Any = tokenizer.pad_token_id
__UpperCAmelCase : List[Any] = tokenizer.bos_token_id
__UpperCAmelCase : str = tokenizer.eos_token_id
__UpperCAmelCase : Tuple = '''speech_to_text_2'''
__UpperCAmelCase : List[str] = '''wav2vec2'''
__UpperCAmelCase : Any = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase_ )
hf_wavavec.save_pretrained(UpperCAmelCase_ )
feature_extractor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0_2_2_4, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
lowercase__ :Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 374
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
lowercase__ :Tuple = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 374
| 1
|
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _UpperCamelCase ( UpperCamelCase ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : int = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def _UpperCamelCase ( UpperCamelCase ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Any = emb.weight.shape
__UpperCAmelCase : Dict = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = emb.weight.data
return lin_layer
def _UpperCamelCase ( UpperCamelCase ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Tuple = torch.load(UpperCamelCase , map_location="cpu" )
__UpperCAmelCase : Tuple = Namespace(**checkpoint["cfg"]["model"] )
__UpperCAmelCase : str = checkpoint["model"]
remove_ignore_keys_(UpperCamelCase )
__UpperCAmelCase : int = state_dict["decoder.embed_tokens.weight"].shape[0]
__UpperCAmelCase : Optional[int] = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
__UpperCAmelCase : Any = XGLMConfig(
vocab_size=UpperCamelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
__UpperCAmelCase : Union[str, Any] = XGLMForCausalLM(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
print(UpperCamelCase )
__UpperCAmelCase : int = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
A = parser.parse_args()
A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 77
|
'''simple docstring'''
def _A ( lowercase__ ):
assert (
isinstance(lowercase__ , lowercase__ ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
lowercase__ , lowercase__ = 1, 1
for _ in range(number_of_steps - 1 ):
lowercase__ , lowercase__ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
| 0
|
"""simple docstring"""
from math import factorial
def __a ( A , A ):
'''simple docstring'''
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(A ) // (factorial(A ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
F'fifty-two card deck is: {combinations(5_2, 5)}\n',
)
print(
"If a class of 40 students must be arranged into groups of",
F'4 for group projects, there are {combinations(4_0, 4)} ways',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
F'are {combinations(1_0, 3)} ways that first, second and',
"third place can be awarded.",
)
| 668
|
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, )
def snake_case__ ( self ):
'''simple docstring'''
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, )
lowercase__ = self.builder.as_dataset(
split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory )
return dataset
| 668
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=1_3 , _A=3 , _A=2_2_4 , _A=3_0 , _A=4_0_0 , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
UpperCamelCase : int = size if size is not None else {"""height""": 1_8, """width""": 1_8}
UpperCamelCase : int = parent
UpperCamelCase : Any = batch_size
UpperCamelCase : List[str] = num_channels
UpperCamelCase : List[str] = image_size
UpperCamelCase : Optional[int] = min_resolution
UpperCamelCase : List[str] = max_resolution
UpperCamelCase : List[Any] = do_resize
UpperCamelCase : int = size
UpperCamelCase : Optional[Any] = do_normalize
UpperCamelCase : int = image_mean
UpperCamelCase : int = image_std
def _a ( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : str = ViTImageProcessor if is_vision_available() else None
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = EfficientFormerImageProcessorTester(self )
@property
def _a ( self ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , """image_mean""" ) )
self.assertTrue(hasattr(_A , """image_std""" ) )
self.assertTrue(hasattr(_A , """do_normalize""" ) )
self.assertTrue(hasattr(_A , """do_resize""" ) )
self.assertTrue(hasattr(_A , """size""" ) )
def _a ( self ):
'''simple docstring'''
pass
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
UpperCamelCase : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase : Union[str, Any] = image_processor(_A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
UpperCamelCase : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase : Tuple = image_processor(_A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
UpperCamelCase : Any = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase : Any = image_processor(_A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 102
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if digit_amount > 0:
return round(number - int(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
return number - int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 84
| 0
|
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def UpperCAmelCase ( A : Dict , A : Union[str, Any] , A : Tuple , A : Optional[int]=None , A : List[str]=None , A : int=None , A : Dict=None , A : List[Any]=None , ):
if attention_mask is None:
SCREAMING_SNAKE_CASE : List[str] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : List[str] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
SCREAMING_SNAKE_CASE : Optional[int] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=A )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE : Tuple = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=A )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class lowerCamelCase_ :
def __init__( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple=13 , lowerCAmelCase__ : List[Any]=7 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : List[Any]=99 , lowerCAmelCase__ : List[Any]=16 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : List[Any]="relu" , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : List[Any]=20 , lowerCAmelCase__ : Any=2 , lowerCAmelCase__ : Union[str, Any]=1 , lowerCAmelCase__ : Union[str, Any]=0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Dict = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = eos_token_id
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : str = self.eos_token_id # Eos Token
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE : int = input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE : Optional[int] = decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config()
SCREAMING_SNAKE_CASE : Dict = prepare_mam_aaa_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def __lowercase ( self : Any ):
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowercase ( self : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = MaMaaaModel(config=lowerCAmelCase__ ).get_decoder().to(lowerCAmelCase__ ).eval()
SCREAMING_SNAKE_CASE : str = inputs_dict['''input_ids''']
SCREAMING_SNAKE_CASE : Tuple = inputs_dict['''attention_mask''']
SCREAMING_SNAKE_CASE : int = inputs_dict['''head_mask''']
# first forward pass
SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE : str = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )['''last_hidden_state''']
SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[
'''last_hidden_state'''
]
# select random slice
SCREAMING_SNAKE_CASE : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-2 ) )
def __lowercase ( self : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = MaMaaaModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.encoder_last_hidden_state
SCREAMING_SNAKE_CASE : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[Any] = model.get_encoder()
encoder.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = MaMaaaEncoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = encoder(inputs_dict['''input_ids'''] , attention_mask=inputs_dict['''attention_mask'''] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[Any] = model.get_decoder()
decoder.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = MaMaaaDecoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = decoder(
input_ids=inputs_dict['''decoder_input_ids'''] , attention_mask=inputs_dict['''decoder_attention_mask'''] , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=inputs_dict['''attention_mask'''] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCamelCase_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
_lowerCAmelCase : Union[str, Any] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
_lowerCAmelCase : List[Any] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
_lowerCAmelCase : str = (
{
'conversational': MaMaaaForConditionalGeneration,
'feature-extraction': MaMaaaModel,
'summarization': MaMaaaForConditionalGeneration,
'text2text-generation': MaMaaaForConditionalGeneration,
'translation': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : str = False
def __lowercase ( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ):
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __lowercase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = MaMaaaModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=lowerCAmelCase__ )
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = model_class(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertEqual(info['''missing_keys'''] , [] )
def __lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase__ )
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
SCREAMING_SNAKE_CASE : Tuple = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = copy.deepcopy(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Tuple = inputs['''input_ids''']
del inputs["input_ids"]
else:
SCREAMING_SNAKE_CASE : str = inputs['''input_ids''']
SCREAMING_SNAKE_CASE : Tuple = inputs.get('''decoder_input_ids''' , lowerCAmelCase__ )
del inputs["input_ids"]
inputs.pop('''decoder_input_ids''' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = model.get_input_embeddings()
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Optional[Any] = wte(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE : int = wte(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = wte(lowerCAmelCase__ )
with torch.no_grad():
model(**lowerCAmelCase__ )[0]
def __lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Any = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE : List[str] = input_ids.ne(1 ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = MaMaaaForConditionalGeneration(lowerCAmelCase__ ).eval().to(lowerCAmelCase__ )
if torch_device == "cuda":
model.half()
model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
model.generate(num_beams=4 , do_sample=lowerCAmelCase__ , early_stopping=lowerCAmelCase__ , num_return_sequences=3 )
def UpperCAmelCase ( A : Optional[Any] ):
return torch.tensor(A , dtype=torch.long , device=A )
lowerCAmelCase_ : Union[str, Any] = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Any ):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' )
def __lowercase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = MaMaaaModel.from_pretrained('''facebook/m2m100_418M''' ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
SCREAMING_SNAKE_CASE : int = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
SCREAMING_SNAKE_CASE : int = prepare_mam_aaa_inputs_dict(model.config , lowerCAmelCase__ , lowerCAmelCase__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**lowerCAmelCase__ )[0]
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 11, 10_24) )
self.assertEqual(output.shape , lowerCAmelCase__ )
# change to expected output here
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(lowerCAmelCase__ )
# change to intended input
SCREAMING_SNAKE_CASE : Optional[Any] = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
SCREAMING_SNAKE_CASE : Dict = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
SCREAMING_SNAKE_CASE : str = prepare_mam_aaa_inputs_dict(model.config , lowerCAmelCase__ , lowerCAmelCase__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**lowerCAmelCase__ )[0]
SCREAMING_SNAKE_CASE : str = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
# change to expected output here
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def __lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' , src_lang='''fr''' , tgt_lang='''en''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'''
''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'''
''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Optional[int] = model.generate(
input_ids=dct['''input_ids'''].to(lowerCAmelCase__ ) , attention_mask=dct['''attention_mask'''].to(lowerCAmelCase__ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('''en''' ) , )
SCREAMING_SNAKE_CASE : Tuple = [
'''The NSA case highlights the total absence of intelligence debate''',
'''I think there are two levels of response from the French government.''',
'''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'''
''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'''
''' communications in France.''',
]
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
assert generated == expected_en
| 464
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Tuple=32 , lowerCAmelCase__ : Union[str, Any]=3 , lowerCAmelCase__ : Dict=10 , lowerCAmelCase__ : List[Any]=[10, 20, 30, 40] , lowerCAmelCase__ : List[Any]=[1, 1, 2, 1] , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]="relu" , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Tuple=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : str = embeddings_size
SCREAMING_SNAKE_CASE : str = hidden_sizes
SCREAMING_SNAKE_CASE : Tuple = depths
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : str = num_labels
SCREAMING_SNAKE_CASE : List[Any] = scope
SCREAMING_SNAKE_CASE : List[Any] = len(lowerCAmelCase__ )
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Any ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __lowercase ( self : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = TFRegNetModel(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowercase ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Any = TFRegNetForImageClassification(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
_lowerCAmelCase : str = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_lowerCAmelCase : str = (
{'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : List[str] = False
_lowerCAmelCase : str = False
_lowerCAmelCase : str = False
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = TFRegNetModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __lowercase ( self : List[str] ):
"""simple docstring"""
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def __lowercase ( self : int ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __lowercase ( self : Dict ):
"""simple docstring"""
pass
def __lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __lowercase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __lowercase ( self : Dict ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) , training=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : int = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_type
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[int] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str={} ):
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ , return_dict=lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , return_dict=lowerCAmelCase__ , **lowerCAmelCase__ ).to_tuple()
def recursive_check(lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ):
if isinstance(lowerCAmelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
recursive_check(lowerCAmelCase__ , lowerCAmelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowerCAmelCase__ , lowerCAmelCase__ ) ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(lowerCAmelCase__ , lowerCAmelCase__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , {'''output_hidden_states''': True} )
SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , {'''output_hidden_states''': True} )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __lowercase ( self : Any ):
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = TFRegNetModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCAmelCase ( ):
SCREAMING_SNAKE_CASE : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : List[str] ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : int = prepare_img()
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCAmelCase__ , return_tensors='''tf''' )
# forward pass
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**lowerCAmelCase__ , training=lowerCAmelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 )
| 464
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class lowercase_ ( __lowerCamelCase ):
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('''>=''', '''0.0.12''')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class lowercase_ ( __lowerCamelCase ):
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 107
|
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
__a : Any = ''
for i in table:
res += inp[i - 1]
return res
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ):
return data[1:] + data[0]
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] ):
__a : Optional[int] = ''
for i in range(len(lowerCamelCase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ):
__a : List[str] = int('0b' + data[0] + data[-1] , 2 )
__a : List[str] = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
__a : List[Any] = message[:4]
__a : str = message[4:]
__a : Any = apply_table(lowerCamelCase_ , lowerCamelCase_ )
__a : int = xor(lowerCamelCase_ , lowerCamelCase_ )
__a : Dict = apply_sbox(lowerCamelCase_ , temp[:4] ) # noqa: E741
__a : Tuple = apply_sbox(lowerCamelCase_ , temp[4:] )
__a : List[Any] = '0' * (2 - len(lowerCamelCase_ )) + l # noqa: E741
__a : List[str] = '0' * (2 - len(lowerCamelCase_ )) + r
__a : List[Any] = apply_table(l + r , lowerCamelCase_ )
__a : Dict = xor(lowerCamelCase_ , lowerCamelCase_ )
return temp + right
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input('''Enter 10 bit key: ''')
SCREAMING_SNAKE_CASE__ = input('''Enter 8 bit message: ''')
SCREAMING_SNAKE_CASE__ = [6, 3, 7, 4, 8, 5, 10, 9]
SCREAMING_SNAKE_CASE__ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
SCREAMING_SNAKE_CASE__ = [2, 4, 3, 1]
SCREAMING_SNAKE_CASE__ = [2, 6, 3, 1, 4, 8, 5, 7]
SCREAMING_SNAKE_CASE__ = [4, 1, 3, 5, 7, 2, 8, 6]
SCREAMING_SNAKE_CASE__ = [4, 1, 2, 3, 2, 3, 4, 1]
SCREAMING_SNAKE_CASE__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
SCREAMING_SNAKE_CASE__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
SCREAMING_SNAKE_CASE__ = apply_table(key, paa_table)
SCREAMING_SNAKE_CASE__ = temp[:5]
SCREAMING_SNAKE_CASE__ = temp[5:]
SCREAMING_SNAKE_CASE__ = left_shift(left)
SCREAMING_SNAKE_CASE__ = left_shift(right)
SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table)
SCREAMING_SNAKE_CASE__ = left_shift(left)
SCREAMING_SNAKE_CASE__ = left_shift(right)
SCREAMING_SNAKE_CASE__ = left_shift(left)
SCREAMING_SNAKE_CASE__ = left_shift(right)
SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table)
# encryption
SCREAMING_SNAKE_CASE__ = apply_table(message, IP)
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
SCREAMING_SNAKE_CASE__ = apply_table(CT, IP)
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 47
| 0
|
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =0
# if input_string is "aba" than new_input_string become "a|b|a"
SCREAMING_SNAKE_CASE__ : Optional[Any] =''''''
SCREAMING_SNAKE_CASE__ : Dict =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(UpperCamelCase__ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
SCREAMING_SNAKE_CASE__ : Optional[int] =0, 0
# length[i] shows the length of palindromic substring with center i
SCREAMING_SNAKE_CASE__ : Tuple =[1 for i in range(len(UpperCamelCase__ ) )]
# for each character in new_string find corresponding palindromic string
SCREAMING_SNAKE_CASE__ : Tuple =0
for j in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE__ : str =1 if j > r else min(length[l + r - j] // 2, r - j + 1 )
while (
j - k >= 0
and j + k < len(UpperCamelCase__ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
SCREAMING_SNAKE_CASE__ : Dict =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
SCREAMING_SNAKE_CASE__ : Optional[int] =j - k + 1 # noqa: E741
SCREAMING_SNAKE_CASE__ : Union[str, Any] =j + k - 1
# update max_length and start position
if max_length < length[j]:
SCREAMING_SNAKE_CASE__ : Any =length[j]
SCREAMING_SNAKE_CASE__ : List[str] =j
# create that string
SCREAMING_SNAKE_CASE__ : List[Any] =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
'''simple docstring'''
from math import isqrt
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Any =False
return [i for i in range(2, UpperCamelCase__ ) if is_prime[i]]
def _a( UpperCamelCase__ : int = 1_0**8 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665
| 0
|
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
snake_case_ = datasets.logging.get_logger(__name__)
snake_case_ = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
snake_case_ = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
snake_case_ = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
snake_case_ = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def a (self : Tuple ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def a (self : Union[str, Any] , a__ : Optional[int] ):
"""simple docstring"""
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
__snake_case = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
__snake_case = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__snake_case = self.config_name.upper()
else:
raise KeyError(
f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
__snake_case = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__snake_case = score.BleurtScorer(os.path.join(a__ , a__ ) )
def a (self : Any , a__ : Any , a__ : Any ):
"""simple docstring"""
__snake_case = self.scorer.score(references=a__ , candidates=a__ )
return {"scores": scores}
| 592
|
def lowerCamelCase__ ( snake_case_ : int = 1000 ) -> int:
__snake_case = 2**power
__snake_case = str(snake_case_ )
__snake_case = list(snake_case_ )
__snake_case = 0
for i in list_num:
sum_of_num += int(snake_case_ )
return sum_of_num
if __name__ == "__main__":
snake_case_ = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
snake_case_ = solution(power)
print('Sum of the digits is: ', result)
| 592
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
UpperCamelCase__ = parser.parse_args()
if args.model_type == "roberta":
UpperCamelCase__ = RobertaForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase__ = "roberta"
elif args.model_type == "gpt2":
UpperCamelCase__ = GPTaLMHeadModel.from_pretrained(args.model_name)
UpperCamelCase__ = "transformer"
UpperCamelCase__ = model.state_dict()
UpperCamelCase__ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
UpperCamelCase__ = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
UpperCamelCase__ = f"""{prefix}.embeddings.{w}.weight"""
UpperCamelCase__ = state_dict[param_name]
for w in ["weight", "bias"]:
UpperCamelCase__ = f"""{prefix}.embeddings.LayerNorm.{w}"""
UpperCamelCase__ = state_dict[param_name]
# Transformer Blocks #
UpperCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
UpperCamelCase__ = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
UpperCamelCase__ = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
UpperCamelCase__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
UpperCamelCase__ = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase__ = state_dict[f"""lm_head.dense.{w}"""]
UpperCamelCase__ = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
UpperCamelCase__ = state_dict[f"""{prefix}.ln_f.{w}"""]
UpperCamelCase__ = state_dict["lm_head.weight"]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 710
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , A__ , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = params
_SCREAMING_SNAKE_CASE = np.array(A__ )
_SCREAMING_SNAKE_CASE = np.array([len(A__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , A__ ) -> Dict:
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Tuple:
return len(self.lengths )
def UpperCamelCase ( self ) -> Dict:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.params.max_model_input_size
_SCREAMING_SNAKE_CASE = self.lengths > max_len
logger.info(F"Splitting {sum(A__ )} too long sequences." )
def divide_chunks(A__ , A__ ):
return [l[i : i + n] for i in range(0 , len(A__ ) , A__ )]
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
if self.params.mlm:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_SCREAMING_SNAKE_CASE = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_SCREAMING_SNAKE_CASE = np.insert(A__ , 0 , A__ )
if sub_s[-1] != sep_id:
_SCREAMING_SNAKE_CASE = np.insert(A__ , len(A__ ) , A__ )
assert len(A__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(A__ )
new_tok_ids.extend(A__ )
new_lengths.extend([len(A__ ) for l in sub_seqs] )
_SCREAMING_SNAKE_CASE = np.array(A__ )
_SCREAMING_SNAKE_CASE = np.array(A__ )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = len(self )
_SCREAMING_SNAKE_CASE = self.lengths > 11
_SCREAMING_SNAKE_CASE = self.token_ids[indices]
_SCREAMING_SNAKE_CASE = self.lengths[indices]
_SCREAMING_SNAKE_CASE = len(self )
logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def UpperCamelCase ( self ) -> int:
if "unk_token" not in self.params.special_tok_ids:
return
else:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""]
_SCREAMING_SNAKE_CASE = len(self )
_SCREAMING_SNAKE_CASE = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_SCREAMING_SNAKE_CASE = (unk_occs / self.lengths) < 0.5
_SCREAMING_SNAKE_CASE = self.token_ids[indices]
_SCREAMING_SNAKE_CASE = self.lengths[indices]
_SCREAMING_SNAKE_CASE = len(self )
logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def UpperCamelCase ( self ) -> Optional[Any]:
if not self.params.is_master:
return
logger.info(F"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCamelCase ( self , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = [t[0] for t in batch]
_SCREAMING_SNAKE_CASE = [t[1] for t in batch]
assert len(A__ ) == len(A__ )
# Max for paddings
_SCREAMING_SNAKE_CASE = max(A__ )
# Pad token ids
if self.params.mlm:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""pad_token"""]
else:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""]
_SCREAMING_SNAKE_CASE = [list(t.astype(A__ ) ) + [pad_idx] * (max_seq_len_ - len(A__ )) for t in token_ids]
assert len(tk_ ) == len(A__ )
assert all(len(A__ ) == max_seq_len_ for t in tk_ )
_SCREAMING_SNAKE_CASE = torch.tensor(tk_ ) # (bs, max_seq_len_)
_SCREAMING_SNAKE_CASE = torch.tensor(A__ ) # (bs)
return tk_t, lg_t
| 0
| 0
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__UpperCamelCase : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
class _UpperCamelCase ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
a_ : bool = None
a_ : bool = None
class _UpperCamelCase ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
a_ : Optional[int] = datasets.Audio()
a_ : Union[str, Any] = """audio"""
a_ : int = AudioFolderConfig
a_ : List[str] # definition at the bottom of the script
a_ : Dict = AudioClassification(audio_column="audio",label_column="label" )
__UpperCamelCase : Any = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
__UpperCamelCase : Union[str, Any] = AUDIO_EXTENSIONS
| 519
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A ( unittest.TestCase ):
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=18 , lowerCamelCase__=30 , lowerCamelCase__=400 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , ) -> List[str]:
'''simple docstring'''
lowercase__ = size if size is not None else {"""height""": 18, """width""": 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_normalize
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A ( __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = ImageGPTImageProcessingTester(self )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """clusters""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_normalize""" ) )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
lowercase__ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCamelCase__ )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(lowerCamelCase__ , """image_processor.json""" )
image_processor_first.to_json_file(lowerCamelCase__ )
lowercase__ = self.image_processing_class.from_json_file(lowerCamelCase__ ).to_dict()
lowercase__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase__ )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCamelCase__ )
lowercase__ = self.image_processing_class.from_pretrained(lowerCamelCase__ ).to_dict()
lowercase__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase__ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _A ( ):
lowercase__ = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
lowercase__ = Image.open(dataset[4]["""file"""] )
lowercase__ = Image.open(dataset[5]["""file"""] )
lowercase__ = [imagea, imagea]
return images
@require_vision
@require_torch
class A ( unittest.TestCase ):
@slow
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
lowercase__ = prepare_images()
# test non-batched
lowercase__ = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
lowercase__ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCamelCase__ )
# test batched
lowercase__ = image_processing(lowerCamelCase__ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
lowercase__ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCamelCase__ )
| 325
| 0
|
def lowercase_ ( __snake_case : int ) -> list:
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
snake_case__ :Dict = gray_code_sequence_string(__snake_case )
#
# convert them to integers
for i in range(len(__snake_case ) ):
snake_case__ :Optional[int] = int(sequence[i] , 2 )
return sequence
def lowercase_ ( __snake_case : int ) -> list:
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
snake_case__ :Optional[int] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
snake_case__ :Tuple = gray_code_sequence_string(bit_count - 1 )
snake_case__ :int = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
snake_case__ :List[Any] = "0" + smaller_sequence[i]
sequence.append(__snake_case )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
snake_case__ :str = "1" + smaller_sequence[i]
sequence.append(__snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self ) -> str:
snake_case__ , snake_case__ :Tuple = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :List[str] = controlnet_params
snake_case__ :Union[str, Any] = "bird"
snake_case__ :Optional[int] = jax.device_count()
snake_case__ :Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
snake_case__ :str = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :str = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :int = replicate(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :str = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :Any = images[0, 253:256, 253:256, -1]
snake_case__ :Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[Any] = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ , snake_case__ :List[str] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :str = controlnet_params
snake_case__ :int = "Chef in the kitchen"
snake_case__ :List[Any] = jax.device_count()
snake_case__ :Dict = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
snake_case__ :Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :Any = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :Dict = replicate(UpperCamelCase )
snake_case__ :Tuple = shard(UpperCamelCase )
snake_case__ :Optional[int] = shard(UpperCamelCase )
snake_case__ :Optional[Any] = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :List[str] = images[0, 253:256, 253:256, -1]
snake_case__ :Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[str] = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 57
| 1
|
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __snake_case ( SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
'''simple docstring'''
return (-y * np.log(SCREAMING_SNAKE_CASE__ ) - (1 - y) * np.log(1 - h )).mean()
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : int = np.dot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return np.sum(y * scores - np.log(1 + np.exp(SCREAMING_SNAKE_CASE__ ) ) )
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=70_000 ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = np.zeros(x.shape[1] )
for iterations in range(SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : Optional[Any] = np.dot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = sigmoid_function(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Union[str, Any] = np.dot(x.T , h - y ) / y.size
_UpperCAmelCase : int = theta - alpha * gradient # updating the weights
_UpperCAmelCase : Any = np.dot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = sigmoid_function(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : str = cost_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if iterations % 100 == 0:
print(f'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = datasets.load_iris()
_lowerCAmelCase : Dict = iris.data[:, :2]
_lowerCAmelCase : Optional[int] = (iris.target != 0) * 1
_lowerCAmelCase : Optional[Any] = 0.1
_lowerCAmelCase : int = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print("theta: ", theta) # printing the theta i.e our weights vector
def __snake_case ( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
return sigmoid_function(
np.dot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((_lowerCAmelCase), (_lowerCAmelCase)) : Optional[Any] = (x[:, 0].min(), x[:, 0].max())
((_lowerCAmelCase), (_lowerCAmelCase)) : int = (x[:, 1].min(), x[:, 1].max())
((_lowerCAmelCase), (_lowerCAmelCase)) : Union[str, Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_lowerCAmelCase : Any = np.c_[xxa.ravel(), xxa.ravel()]
_lowerCAmelCase : Union[str, Any] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 289
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = SwinConfig(image_size=192 )
if "base" in model_name:
_UpperCAmelCase : Tuple = 6
_UpperCAmelCase : Optional[Any] = 128
_UpperCAmelCase : Dict = (2, 2, 18, 2)
_UpperCAmelCase : List[Any] = (4, 8, 16, 32)
elif "large" in model_name:
_UpperCAmelCase : int = 12
_UpperCAmelCase : Optional[int] = 192
_UpperCAmelCase : Optional[Any] = (2, 2, 18, 2)
_UpperCAmelCase : str = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
_UpperCAmelCase : Optional[int] = window_size
_UpperCAmelCase : Optional[int] = embed_dim
_UpperCAmelCase : List[Any] = depths
_UpperCAmelCase : Any = num_heads
return config
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict ) -> str:
'''simple docstring'''
if "encoder.mask_token" in name:
_UpperCAmelCase : Dict = name.replace("encoder.mask_token" , "embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
_UpperCAmelCase : Optional[int] = name.replace("encoder.patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
_UpperCAmelCase : Any = name.replace("encoder.patch_embed.norm" , "embeddings.norm" )
if "attn.proj" in name:
_UpperCAmelCase : int = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_UpperCAmelCase : Dict = name.replace("attn" , "attention.self" )
if "norm1" in name:
_UpperCAmelCase : List[str] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_UpperCAmelCase : int = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_UpperCAmelCase : Any = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
_UpperCAmelCase : Any = "layernorm.weight"
if name == "encoder.norm.bias":
_UpperCAmelCase : List[Any] = "layernorm.bias"
if "decoder" in name:
pass
else:
_UpperCAmelCase : Dict = "swin." + name
return name
def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_UpperCAmelCase : Dict = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
_UpperCAmelCase : int = key.split("." )
_UpperCAmelCase : List[str] = int(key_split[2] )
_UpperCAmelCase : Dict = int(key_split[4] )
_UpperCAmelCase : str = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase : Optional[Any] = val[:dim, :]
_UpperCAmelCase : List[Any] = val[
dim : dim * 2, :
]
_UpperCAmelCase : Optional[Any] = val[-dim:, :]
else:
_UpperCAmelCase : Optional[int] = val[
:dim
]
_UpperCAmelCase : Dict = val[
dim : dim * 2
]
_UpperCAmelCase : Union[str, Any] = val[
-dim:
]
else:
_UpperCAmelCase : List[Any] = val
return orig_state_dict
def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = torch.load(SCREAMING_SNAKE_CASE__ , map_location="cpu" )["model"]
_UpperCAmelCase : Union[str, Any] = get_swin_config(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : str = SwinForMaskedImageModeling(SCREAMING_SNAKE_CASE__ )
model.eval()
_UpperCAmelCase : Optional[Any] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : List[Any] = ViTImageProcessor(size={"height": 192, "width": 192} )
_UpperCAmelCase : int = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
_UpperCAmelCase : str = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" )
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**SCREAMING_SNAKE_CASE__ ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCAmelCase : str = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 289
| 1
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 32
def A_ ( __lowercase , __lowercase = 16 ):
UpperCamelCase_ : int =AutoTokenizer.from_pretrained('bert-base-cased' )
UpperCamelCase_ : Optional[int] =load_dataset('glue' , 'mrpc' )
def tokenize_function(__lowercase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ : Union[str, Any] =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase_ : Any =datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ : str =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase_ : List[Any] =1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase_ : int =16
elif accelerator.mixed_precision != "no":
UpperCamelCase_ : Optional[int] =8
else:
UpperCamelCase_ : Dict =None
return tokenizer.pad(
UpperCAmelCase__ , padding='longest' , max_length=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase_ : str =DataLoader(
tokenized_datasets['train'] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
UpperCamelCase_ : int =DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__SCREAMING_SNAKE_CASE = mocked_dataloaders # noqa: F811
def A_ ( __lowercase , __lowercase ):
if os.environ.get('TESTING_MOCKED_DATALOADERS' , UpperCAmelCase__ ) == "1":
UpperCamelCase_ : int =2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCamelCase_ : Optional[int] =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
UpperCamelCase_ : Any =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ : str =config['lr']
UpperCamelCase_ : List[str] =int(config['num_epochs'] )
UpperCamelCase_ : Union[str, Any] =int(config['seed'] )
UpperCamelCase_ : int =int(config['batch_size'] )
set_seed(UpperCAmelCase__ )
UpperCamelCase_ , UpperCamelCase_ : Dict =get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_ : Optional[Any] =evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
UpperCamelCase_ : int =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCamelCase_ : List[Any] =batch_size // MAX_GPU_BATCH_SIZE
UpperCamelCase_ : int =MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ : Dict =AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=UpperCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase_ : int =model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase_ : Tuple =AdamW(params=model.parameters() , lr=UpperCAmelCase__ )
# Instantiate scheduler
UpperCamelCase_ : Any =get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase__ , num_warmup_steps=1_00 , num_training_steps=(len(UpperCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Any =accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCamelCase_ : int =os.path.split(UpperCAmelCase__ )[-1].split('.' )[0]
accelerator.init_trackers(UpperCAmelCase__ , UpperCAmelCase__ )
# Now we train the model
for epoch in range(UpperCAmelCase__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCamelCase_ : int =0
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase_ : int =model(**UpperCAmelCase__ )
UpperCamelCase_ : List[str] =outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCamelCase_ : str =loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase_ : Optional[Any] =model(**UpperCAmelCase__ )
UpperCamelCase_ : Optional[Any] =outputs.logits.argmax(dim=-1 )
UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , )
UpperCamelCase_ : List[str] =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'accuracy': eval_metric['accuracy'],
'f1': eval_metric['f1'],
'train_loss': total_loss.item() / len(UpperCAmelCase__ ),
'epoch': epoch,
} , step=UpperCAmelCase__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def A_ ( ):
UpperCamelCase_ : str =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=UpperCAmelCase__ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
UpperCamelCase_ : Optional[int] =parser.parse_args()
UpperCamelCase_ : Optional[int] ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 710
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__SCREAMING_SNAKE_CASE = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__SCREAMING_SNAKE_CASE = TaTokenizerFast
__SCREAMING_SNAKE_CASE = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 395
| 0
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowerCamelCase :
__a = PegasusConfig
__a = {}
__a = "gelu"
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=40 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase=0 , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Dict= parent
SCREAMING_SNAKE_CASE__: str= batch_size
SCREAMING_SNAKE_CASE__: str= seq_length
SCREAMING_SNAKE_CASE__: int= is_training
SCREAMING_SNAKE_CASE__: Tuple= use_labels
SCREAMING_SNAKE_CASE__: str= vocab_size
SCREAMING_SNAKE_CASE__: Any= hidden_size
SCREAMING_SNAKE_CASE__: List[str]= num_hidden_layers
SCREAMING_SNAKE_CASE__: Tuple= num_attention_heads
SCREAMING_SNAKE_CASE__: Tuple= intermediate_size
SCREAMING_SNAKE_CASE__: List[Any]= hidden_dropout_prob
SCREAMING_SNAKE_CASE__: Optional[int]= attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__: Optional[int]= max_position_embeddings
SCREAMING_SNAKE_CASE__: int= eos_token_id
SCREAMING_SNAKE_CASE__: Any= pad_token_id
SCREAMING_SNAKE_CASE__: Union[str, Any]= bos_token_id
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: str= ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE__: Tuple= tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__: Dict= tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__: str= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE__: Tuple= prepare_pegasus_inputs_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, inputs_dict
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Any= TFPegasusModel(config=lowerCAmelCase ).get_decoder()
SCREAMING_SNAKE_CASE__: Dict= inputs_dict['''input_ids''']
SCREAMING_SNAKE_CASE__: List[Any]= input_ids[:1, :]
SCREAMING_SNAKE_CASE__: List[str]= inputs_dict['''attention_mask'''][:1, :]
SCREAMING_SNAKE_CASE__: str= inputs_dict['''head_mask''']
SCREAMING_SNAKE_CASE__: List[str]= 1
# first forward pass
SCREAMING_SNAKE_CASE__: List[Any]= model(lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase , use_cache=lowerCAmelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__: Union[str, Any]= ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__: List[str]= tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__: List[Any]= tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE__: Dict= tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE__: List[Any]= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: Optional[Any]= model(lowerCAmelCase , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__: List[Any]= int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__: Union[str, Any]= output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__: Tuple= output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase , lowerCAmelCase , rtol=1e-3 )
def A__ ( snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : int=None , snake_case_ : Tuple=None , snake_case_ : Optional[int]=None , ):
if attention_mask is None:
SCREAMING_SNAKE_CASE__: str= tf.cast(tf.math.not_equal(snake_case_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__: Any= tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__: int= tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__: str= tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__: List[str]= tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__a = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__a = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__a = True
__a = False
__a = False
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Tuple= TFPegasusModelTester(self )
SCREAMING_SNAKE_CASE__: Optional[Any]= ConfigTester(self , config_class=lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Any= self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
__a = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__a = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__a = "google/pegasus-xsum"
@cached_property
def UpperCamelCase_ ( self ) -> Dict:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: List[Any]= TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCamelCase_ ( self , **lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: str= self.translate_src_text(**lowerCAmelCase )
assert self.expected_text == generated_words
def UpperCamelCase_ ( self , **lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: Tuple= self.tokenizer(self.src_text , **lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''tf''' )
SCREAMING_SNAKE_CASE__: Tuple= self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase )
return generated_words
@slow
def UpperCamelCase_ ( self ) -> str:
self._assert_generated_batch_equal_expected()
| 64
|
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : List[Any] = args.pruning_method
snake_case_ : Any = args.threshold
snake_case_ : Optional[Any] = args.model_name_or_path.rstrip("""/""" )
snake_case_ : Optional[Any] = args.target_model_path
print(f'Load fine-pruned model from {model_name_or_path}' )
snake_case_ : str = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , """pytorch_model.bin""" ) )
snake_case_ : Optional[int] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
snake_case_ : Dict = tensor
print(f'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
snake_case_ : List[Any] = tensor
print(f'Copied layer {name}' )
elif "bias" in name:
snake_case_ : Tuple = tensor
print(f'Copied layer {name}' )
else:
if pruning_method == "magnitude":
snake_case_ : List[Any] = MagnitudeBinarizer.apply(inputs=SCREAMING_SNAKE_CASE__ , threshold=SCREAMING_SNAKE_CASE__ )
snake_case_ : int = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
snake_case_ : List[str] = name[:-6]
snake_case_ : int = model[f'{prefix_}mask_scores']
snake_case_ : Optional[Any] = TopKBinarizer.apply(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
snake_case_ : str = name[:-6]
snake_case_ : str = model[f'{prefix_}mask_scores']
snake_case_ : List[str] = ThresholdBinarizer.apply(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
snake_case_ : List[Any] = name[:-6]
snake_case_ : Optional[int] = model[f'{prefix_}mask_scores']
snake_case_ , snake_case_ : List[str] = -0.1, 1.1
snake_case_ : Optional[int] = torch.sigmoid(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[int] = s * (r - l) + l
snake_case_ : Tuple = s_bar.clamp(min=0.0 , max=1.0 )
snake_case_ : List[str] = tensor * mask
print(f'Pruned layer {name}' )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
snake_case_ : int = os.path.join(
os.path.dirname(SCREAMING_SNAKE_CASE__ ) , f'bertarized_{os.path.basename(SCREAMING_SNAKE_CASE__ )}' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
shutil.copytree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f'\nCreated folder {target_model_path}' )
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
a_ = parser.parse_args()
main(args)
| 480
| 0
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase_ ( ) -> Tuple:
lowerCAmelCase__ : Any = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__UpperCAmelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__UpperCAmelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__UpperCAmelCase )
return parser.parse_args()
def lowercase_ ( ) -> str:
lowerCAmelCase__ : Optional[int] = parse_args()
# Import training_script as a module.
lowerCAmelCase__ : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase__ : Optional[int] = script_fpath.stem
lowerCAmelCase__ : Dict = importlib.import_module(__UpperCAmelCase )
# Patch sys.argv
lowerCAmelCase__ : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 507
|
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
@dataclass
class _lowerCamelCase :
_lowerCamelCase :nn.Module
_lowerCamelCase :List[nn.Module] = field(default_factory=a_ )
_lowerCamelCase :list = field(default_factory=a_ )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : str , UpperCamelCase : Tensor , UpperCamelCase : Tensor ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase , nn.Convad ) or isinstance(UpperCamelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase )
def __call__( self : int , UpperCamelCase : Tensor ) -> Tuple:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCamelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _lowerCamelCase :
_lowerCamelCase :nn.Module
_lowerCamelCase :nn.Module
_lowerCamelCase :int = 0
_lowerCamelCase :List = field(default_factory=a_ )
_lowerCamelCase :List = field(default_factory=a_ )
def __call__( self : str , UpperCamelCase : Tensor ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = Tracker(self.dest )(UpperCamelCase ).parametrized
lowerCAmelCase__ : Union[str, Any] = Tracker(self.src )(UpperCamelCase ).parametrized
lowerCAmelCase__ : Any = list(filter(lambda UpperCamelCase : type(UpperCamelCase ) not in self.src_skip , UpperCamelCase ) )
lowerCAmelCase__ : int = list(filter(lambda UpperCamelCase : type(UpperCamelCase ) not in self.dest_skip , UpperCamelCase ) )
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(UpperCamelCase )} operations while"""
f""" destination module has {len(UpperCamelCase )}.""" )
for dest_m, src_m in zip(UpperCamelCase , UpperCamelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True ) -> List[str]:
print(f"""Converting {name}...""" )
with torch.no_grad():
lowerCAmelCase__ : Any = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
lowerCAmelCase__ : int = ResNetForImageClassification(__UpperCAmelCase ).eval()
lowerCAmelCase__ : List[str] = ModuleTransfer(src=__UpperCAmelCase , dest=__UpperCAmelCase )
lowerCAmelCase__ : str = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCAmelCase )
assert torch.allclose(from_model(__UpperCAmelCase ) , our_model(__UpperCAmelCase ).logits ), "The model logits don't match the original one."
lowerCAmelCase__ : int = f"""resnet{'-'.join(name.split('resnet' ) )}"""
print(__UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=__UpperCAmelCase , )
# we can use the convnext one
lowerCAmelCase__ : Tuple = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=__UpperCAmelCase , )
print(f"""Pushed {checkpoint_name}""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True ) -> List[str]:
lowerCAmelCase__ : Dict = """imagenet-1k-id2label.json"""
lowerCAmelCase__ : Any = 1000
lowerCAmelCase__ : Optional[int] = (1, num_labels)
lowerCAmelCase__ : List[Any] = """huggingface/label-files"""
lowerCAmelCase__ : int = num_labels
lowerCAmelCase__ : Any = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase__ : Optional[Any] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[int] = idalabel
lowerCAmelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Union[str, Any] = partial(__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(__UpperCAmelCase , names_to_config[model_name] , __UpperCAmelCase , __UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 507
| 1
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : Tuple=13 , lowerCamelCase : Dict=3 , lowerCamelCase : Tuple=True , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Any=0.1 , lowerCamelCase : str=0.1 , lowerCamelCase : Union[str, Any]=224 , lowerCamelCase : str=1000 , lowerCamelCase : int=[3, 3, 6, 4] , lowerCamelCase : List[str]=[48, 56, 112, 220] , ) -> List[Any]:
__snake_case : Optional[int] = parent
__snake_case : Tuple = batch_size
__snake_case : Union[str, Any] = num_channels
__snake_case : Optional[Any] = is_training
__snake_case : List[str] = use_labels
__snake_case : int = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : List[str] = num_labels
__snake_case : Optional[Any] = image_size
__snake_case : Dict = layer_depths
__snake_case : List[Any] = embed_dims
def __snake_case ( self : Optional[int] ) -> Tuple:
__snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : str = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : List[str] ) -> Dict:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCamelCase , layer_scale_init_value=1E-5 , )
def __snake_case ( self : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] ) -> List[Any]:
__snake_case : Any = SwiftFormerModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Dict = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __snake_case ( self : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] ) -> List[Any]:
__snake_case : Dict = self.num_labels
__snake_case : Optional[Any] = SwiftFormerForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__snake_case : List[Any] = SwiftFormerForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Any = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Optional[int] ) -> List[str]:
((__snake_case) , (__snake_case) , (__snake_case)) : List[Any] = self.prepare_config_and_inputs()
__snake_case : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : Any = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : Any = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : int = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Dict = False
def __snake_case ( self : Union[str, Any] ) -> Dict:
__snake_case : int = SwiftFormerModelTester(self )
__snake_case : Union[str, Any] = ConfigTester(
self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def __snake_case ( self : int ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def __snake_case ( self : str ) -> Tuple:
pass
def __snake_case ( self : Optional[Any] ) -> Tuple:
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = model_class(lowerCamelCase )
__snake_case : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def __snake_case ( self : Dict ) -> List[str]:
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : int = model_class(lowerCamelCase )
__snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[Any] = [*signature.parameters.keys()]
__snake_case : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : int ) -> Any:
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def __snake_case ( self : Optional[int] ) -> Union[str, Any]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Dict = SwiftFormerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def __snake_case ( self : Tuple ) -> Union[str, Any]:
pass
def __snake_case ( self : Optional[int] ) -> List[Any]:
def check_hidden_states_output(lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict ):
__snake_case : Any = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : Dict = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : Tuple = outputs.hidden_states
__snake_case : Optional[Any] = 8
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Any = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Union[str, Any] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
def _config_zero_init(lowerCamelCase : Optional[int] ):
__snake_case : List[str] = copy.deepcopy(lowerCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCamelCase , lowerCamelCase , 1E-10 )
if isinstance(getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase ) , lowerCamelCase ):
__snake_case : Optional[Any] = _config_zero_init(getattr(lowerCamelCase , lowerCamelCase ) )
setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return configs_no_init
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Dict = _config_zero_init(lowerCamelCase )
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __snake_case ( self : List[Any] ) -> Any:
pass
def lowerCAmelCase_ ( ):
__snake_case : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : int ) -> Any:
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def __snake_case ( self : int ) -> Tuple:
__snake_case : Dict = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(lowerCamelCase )
__snake_case : Dict = self.default_image_processor
__snake_case : int = prepare_img()
__snake_case : Tuple = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : Dict = model(**lowerCamelCase )
# verify the logits
__snake_case : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__snake_case : Tuple = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
| 81
|
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = (DPMSolverSDEScheduler,)
SCREAMING_SNAKE_CASE = 10
def _UpperCamelCase ( self ,**A ):
UpperCAmelCase = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**A )
return config
def _UpperCamelCase ( self ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=A )
def _UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=A ,beta_end=A )
def _UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A )
def _UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(A ,A )
UpperCAmelCase = model(A ,A )
UpperCAmelCase = scheduler.step(A ,A ,A )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(A ) )
UpperCAmelCase = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def _UpperCamelCase ( self ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCAmelCase = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(A ,A )
UpperCAmelCase = model(A ,A )
UpperCAmelCase = scheduler.step(A ,A ,A )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(A ) )
UpperCAmelCase = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def _UpperCamelCase ( self ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps ,device=A )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter.to(A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(A ,A )
UpperCAmelCase = model(A ,A )
UpperCAmelCase = scheduler.step(A ,A ,A )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(A ) )
UpperCAmelCase = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def _UpperCamelCase ( self ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**A ,use_karras_sigmas=A )
scheduler.set_timesteps(self.num_inference_steps ,device=A )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter.to(A ) * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(A )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(A ,A )
UpperCAmelCase = model(A ,A )
UpperCAmelCase = scheduler.step(A ,A ,A )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(A ) )
UpperCAmelCase = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 341
| 0
|
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase__( unittest.TestCase):
def __init__( self: int , UpperCamelCase_: Tuple , UpperCamelCase_: Any=7 , UpperCamelCase_: str=3 , UpperCamelCase_: int=18 , UpperCamelCase_: Union[str, Any]=30 , UpperCamelCase_: List[Any]=4_00 , UpperCamelCase_: str=True , UpperCamelCase_: Dict=None , UpperCamelCase_: Tuple=True , ):
__lowerCamelCase = size if size is not None else {"""height""": 18, """width""": 18}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = do_normalize
def lowerCAmelCase__ ( self: Union[str, Any] ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase__( lowercase_ , unittest.TestCase):
UpperCAmelCase__ : Optional[Any] = ImageGPTImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = ImageGPTImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self: Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , """clusters""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """do_normalize""" ) )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
__lowerCamelCase = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCamelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = os.path.join(UpperCamelCase_ , """image_processor.json""" )
image_processor_first.to_json_file(UpperCamelCase_ )
__lowerCamelCase = self.image_processing_class.from_json_file(UpperCamelCase_ ).to_dict()
__lowerCamelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCamelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = self.image_processing_class.from_pretrained(UpperCamelCase_ ).to_dict()
__lowerCamelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCamelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCamelCase_ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def lowerCAmelCase__ ( self: Optional[int] ):
pass
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
__lowerCamelCase = Image.open(dataset[4]["""file"""] )
__lowerCamelCase = Image.open(dataset[5]["""file"""] )
__lowerCamelCase = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase__( unittest.TestCase):
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
__lowerCamelCase = prepare_images()
# test non-batched
__lowerCamelCase = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
__lowerCamelCase = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , UpperCamelCase_ )
# test batched
__lowerCamelCase = image_processing(UpperCamelCase_ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
__lowerCamelCase = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , UpperCamelCase_ )
| 719
|
from __future__ import annotations
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(A__ ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80
| 0
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
a =input("""Enter image url: """).strip()
print(F"""Downloading image from {url} ...""")
a =BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
a =soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
a =requests.get(image_url).content
a =F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 652
|
import logging
import os
from .state import PartialState
class __A( logging.LoggerAdapter ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ (SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
UpperCamelCase__ = kwargs.pop("""main_process_only""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = kwargs.pop("""in_order""" , SCREAMING_SNAKE_CASE_ )
if self.isEnabledFor(SCREAMING_SNAKE_CASE_ ):
if self._should_log(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ , UpperCamelCase__ = self.process(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.logger.log(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
elif in_order:
UpperCamelCase__ = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
UpperCamelCase__ , UpperCamelCase__ = self.process(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.logger.log(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
state.wait_for_everyone()
def __magic_name__ ( __a : str , __a : str = None ):
'''simple docstring'''
if log_level is None:
UpperCamelCase__ = os.environ.get("""ACCELERATE_LOG_LEVEL""" , __a )
UpperCamelCase__ = logging.getLogger(__a )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__a , {} )
| 513
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ ={"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Tuple =IFInpaintingSuperResolutionPipeline
__a : Dict =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__a : int =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
__a : Union[str, Any] =PipelineTesterMixin.required_optional_params - {"""latents"""}
def __snake_case ( self ):
return self._get_superresolution_dummy_components()
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ):
if str(UpperCAmelCase_ ).startswith('''mps''' ):
lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ )
else:
lowerCAmelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __snake_case ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __snake_case ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __snake_case ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __snake_case ( self ):
self._test_save_load_local()
def __snake_case ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 33
| 1
|
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
snake_case = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
snake_case = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
snake_case = R"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) ,homepage='https://github.com/hendrycks/math' ,codebase_urls=['https://github.com/hendrycks/math'] ,)
def __UpperCAmelCase ( self : Optional[Any] ,__A : List[Any] ,__A : List[Any] ) -> str:
_lowercase = 0.0
for i, j in zip(__A ,__A ):
n_correct += 1.0 if math_equivalence.is_equiv(__A ,__A ) else 0.0
_lowercase = n_correct / len(__A )
return {
"accuracy": accuracy,
}
| 67
|
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowerCAmelCase ( UpperCamelCase__ : BertModel , UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
__UpperCAmelCase = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
__UpperCAmelCase = model.state_dict()
def to_tf_var_name(UpperCamelCase__ : str ):
for patt, repl in iter(UpperCamelCase__ ):
__UpperCAmelCase = name.replace(UpperCamelCase__ , UpperCamelCase__ )
return f"""bert/{name}"""
def create_tf_var(UpperCamelCase__ : np.ndarray , UpperCamelCase__ : str , UpperCamelCase__ : tf.Session ):
__UpperCAmelCase = tf.dtypes.as_dtype(tensor.dtype )
__UpperCAmelCase = tf.get_variable(dtype=UpperCamelCase__ , shape=tensor.shape , name=UpperCamelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(UpperCamelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__UpperCAmelCase = to_tf_var_name(UpperCamelCase__ )
__UpperCAmelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__UpperCAmelCase = torch_tensor.T
__UpperCAmelCase = create_tf_var(tensor=UpperCamelCase__ , name=UpperCamelCase__ , session=UpperCamelCase__ )
tf.keras.backend.set_value(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = session.run(UpperCamelCase__ )
print(f"""Successfully created {tf_name}: {np.allclose(UpperCamelCase__ , UpperCamelCase__ )}""" )
__UpperCAmelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def lowerCAmelCase ( UpperCamelCase__ : List[str]=None ):
"""simple docstring"""
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''Directory in which to save tensorflow model''' )
__UpperCAmelCase = parser.parse_args(UpperCamelCase__ )
__UpperCAmelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=UpperCamelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 262
| 0
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def A__ ( *A__ ,**A__):
pass
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = np.array(lowerCAmelCase__ )
lowercase = npimg.shape
return {"hash": hashimage(lowerCAmelCase__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
lowercase_ : Tuple =dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowercase_ : List[str] =dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def A__ ( self ,A__ ,A__ ,A__):
lowercase = MaskGenerationPipeline(model=A__ ,image_processor=A__)
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def A__ ( self ,A__ ,A__):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''')
def A__ ( self):
pass
@slow
@require_torch
def A__ ( self):
lowercase = pipeline('''mask-generation''' ,model='''facebook/sam-vit-huge''')
lowercase = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' ,points_per_batch=2_5_6)
# Shortening by hashing
lowercase = []
for i, o in enumerate(outputs['''masks''']):
new_outupt += [{"mask": mask_to_test_readable(A__), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(A__ ,decimals=4) ,[
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8871}
] ,)
# fmt: on
@require_torch
@slow
def A__ ( self):
lowercase = '''facebook/sam-vit-huge'''
lowercase = pipeline('''mask-generation''' ,model=A__)
lowercase = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' ,pred_iou_thresh=1 ,points_per_batch=2_5_6)
# Shortening by hashing
lowercase = []
for i, o in enumerate(outputs['''masks''']):
new_outupt += [{"mask": mask_to_test_readable(A__), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(A__ ,decimals=4) ,[
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0053},
] ,)
| 633
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase = emb.weight.data
return lin_layer
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
lowercase = mam_aaa['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase = MaMaaaConfig(
vocab_size=lowerCAmelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
lowercase = state_dict['''decoder.embed_tokens.weight''']
lowercase = MaMaaaForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowercase__ :Tuple = parser.parse_args()
lowercase__ :int = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 633
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case: Tuple = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case: Tuple = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case: Optional[int] = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__snake_case: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 577
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 518
| 0
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
A :List[Any] = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = 5_0257 , __UpperCAmelCase = 1024 , __UpperCAmelCase = 768 , __UpperCAmelCase = 12 , __UpperCAmelCase = 12 , __UpperCAmelCase = None , __UpperCAmelCase = "gelu_new" , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 1E-5 , __UpperCAmelCase = 0.0_2 , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = False , ):
"""simple docstring"""
super().__init__()
a__ : List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
a__ : List[Any] = prefix_inner_dim
a__ : List[str] = prefix_hidden_dim
a__ : List[str] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
a__ : Tuple = (
nn.Linear(self.prefix_hidden_dim , lowerCAmelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
a__ : Dict = GPTaConfig(
vocab_size=lowerCAmelCase_ , n_positions=lowerCAmelCase_ , n_embd=lowerCAmelCase_ , n_layer=lowerCAmelCase_ , n_head=lowerCAmelCase_ , n_inner=lowerCAmelCase_ , activation_function=lowerCAmelCase_ , resid_pdrop=lowerCAmelCase_ , embd_pdrop=lowerCAmelCase_ , attn_pdrop=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , initializer_range=lowerCAmelCase_ , scale_attn_weights=lowerCAmelCase_ , use_cache=lowerCAmelCase_ , scale_attn_by_inverse_layer_idx=lowerCAmelCase_ , reorder_and_upcast_attn=lowerCAmelCase_ , )
a__ : Any = GPTaLMHeadModel(lowerCAmelCase_ )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , ):
"""simple docstring"""
a__ : List[str] = self.transformer.transformer.wte(lowerCAmelCase_ )
a__ : Dict = self.encode_prefix(lowerCAmelCase_ )
a__ : Optional[int] = self.decode_prefix(lowerCAmelCase_ )
a__ : List[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
a__ : Dict = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
a__ : str = torch.cat((dummy_token, input_ids) , dim=1 )
a__ : Optional[Any] = self.transformer(inputs_embeds=lowerCAmelCase_ , labels=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _A ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
return torch.zeros(lowerCAmelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCAmelCase_ )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.encode_prefix(lowerCAmelCase_ )
@torch.no_grad()
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Any = torch.split(lowerCAmelCase_ , 1 , dim=0 )
a__ : Any = []
a__ : int = []
for feature in features:
a__ : Optional[Any] = self.decode_prefix(feature.to(lowerCAmelCase_ ) ) # back to the clip feature
# Only support beam search for now
a__ , a__ : List[str] = self.generate_beam(
input_embeds=lowerCAmelCase_ , device=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
a__ : int = torch.stack(lowerCAmelCase_ )
a__ : List[str] = torch.stack(lowerCAmelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _A ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase = 5 , __UpperCAmelCase = 67 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = None , ):
"""simple docstring"""
a__ : Tuple = eos_token_id
a__ : str = None
a__ : Union[str, Any] = None
a__ : Dict = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=torch.int )
a__ : Tuple = torch.zeros(lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=torch.bool )
if input_embeds is not None:
a__ : int = input_embeds
else:
a__ : Optional[int] = self.transformer.transformer.wte(lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
a__ : str = self.transformer(inputs_embeds=lowerCAmelCase_ )
a__ : Union[str, Any] = outputs.logits
a__ : int = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
a__ : Dict = logits.softmax(-1 ).log()
if scores is None:
a__ , a__ : List[str] = logits.topk(lowerCAmelCase_ , -1 )
a__ : str = generated.expand(lowerCAmelCase_ , *generated.shape[1:] )
a__ , a__ : Union[str, Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
a__ : Dict = next_tokens
else:
a__ : Any = tokens.expand(lowerCAmelCase_ , *tokens.shape[1:] )
a__ : Tuple = torch.cat((tokens, next_tokens) , dim=1 )
else:
a__ : Optional[int] = -float(np.inf )
a__ : int = 0
a__ : Tuple = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
a__ : List[str] = scores_sum / seq_lengths[:, None]
a__ , a__ : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCAmelCase_ , -1 )
a__ : Optional[int] = next_tokens // scores_sum.shape[1]
a__ : str = seq_lengths[next_tokens_source]
a__ : str = next_tokens % scores_sum.shape[1]
a__ : Any = next_tokens.unsqueeze(1 )
a__ : int = tokens[next_tokens_source]
a__ : Any = torch.cat((tokens, next_tokens) , dim=1 )
a__ : Tuple = generated[next_tokens_source]
a__ : Any = scores_sum_average * seq_lengths
a__ : Optional[Any] = is_stopped[next_tokens_source]
a__ : List[Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
a__ : Any = torch.cat((generated, next_token_embed) , dim=1 )
a__ : Optional[int] = is_stopped + next_tokens.eq(lowerCAmelCase_ ).squeeze()
if is_stopped.all():
break
a__ : Tuple = scores / seq_lengths
a__ : Union[str, Any] = scores.argsort(descending=lowerCAmelCase_ )
# tokens tensors are already padded to max_seq_length
a__ : Any = [tokens[i] for i in order]
a__ : Any = torch.stack(lowerCAmelCase_ , dim=0 )
a__ : Optional[int] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 712
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> Dict:
a__ : List[Any] = checkpoint
a__ : List[Any] = {}
a__ : int = vae_state_dict["encoder.conv_in.weight"]
a__ : Any = vae_state_dict["encoder.conv_in.bias"]
a__ : Union[str, Any] = vae_state_dict["encoder.conv_out.weight"]
a__ : Optional[Any] = vae_state_dict["encoder.conv_out.bias"]
a__ : List[str] = vae_state_dict["encoder.norm_out.weight"]
a__ : Optional[int] = vae_state_dict["encoder.norm_out.bias"]
a__ : Optional[Any] = vae_state_dict["decoder.conv_in.weight"]
a__ : Dict = vae_state_dict["decoder.conv_in.bias"]
a__ : Union[str, Any] = vae_state_dict["decoder.conv_out.weight"]
a__ : Optional[int] = vae_state_dict["decoder.conv_out.bias"]
a__ : Dict = vae_state_dict["decoder.norm_out.weight"]
a__ : int = vae_state_dict["decoder.norm_out.bias"]
a__ : Any = vae_state_dict["quant_conv.weight"]
a__ : Any = vae_state_dict["quant_conv.bias"]
a__ : str = vae_state_dict["post_quant_conv.weight"]
a__ : Any = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
a__ : Dict = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
a__ : Optional[int] = {
layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
a__ : Dict = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
a__ : int = {
layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(__UpperCamelCase )
}
for i in range(__UpperCamelCase ):
a__ : Tuple = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key]
if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
a__ : Optional[Any] = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.weight' )
a__ : List[str] = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.bias' )
a__ : Optional[Any] = renew_vae_resnet_paths(__UpperCamelCase )
a__ : List[str] = {"old": F'down.{i}.block', "new": F'down_blocks.{i}.resnets'}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
a__ : Any = [key for key in vae_state_dict if "encoder.mid.block" in key]
a__ : Optional[int] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a__ : List[Any] = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key]
a__ : Dict = renew_vae_resnet_paths(__UpperCamelCase )
a__ : Union[str, Any] = {"old": F'mid.block_{i}', "new": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
a__ : Dict = [key for key in vae_state_dict if "encoder.mid.attn" in key]
a__ : Optional[int] = renew_vae_attention_paths(__UpperCamelCase )
a__ : str = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
for i in range(__UpperCamelCase ):
a__ : Optional[Any] = num_up_blocks - 1 - i
a__ : str = [
key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key
]
if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
a__ : Dict = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.weight'
]
a__ : Optional[int] = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.bias'
]
a__ : int = renew_vae_resnet_paths(__UpperCamelCase )
a__ : Optional[Any] = {"old": F'up.{block_id}.block', "new": F'up_blocks.{i}.resnets'}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
a__ : Union[str, Any] = [key for key in vae_state_dict if "decoder.mid.block" in key]
a__ : Dict = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a__ : List[str] = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key]
a__ : List[str] = renew_vae_resnet_paths(__UpperCamelCase )
a__ : Union[str, Any] = {"old": F'mid.block_{i}', "new": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
a__ : int = [key for key in vae_state_dict if "decoder.mid.attn" in key]
a__ : str = renew_vae_attention_paths(__UpperCamelCase )
a__ : List[str] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
return new_checkpoint
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , ) -> str:
# Only support V1
a__ : Optional[int] = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
a__ : Any = io.BytesIO(r.content )
a__ : int = OmegaConf.load(__UpperCamelCase )
a__ : Any = 5_12
a__ : str = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
a__ : Optional[Any] = {}
with safe_open(__UpperCamelCase , framework="pt" , device="cpu" ) as f:
for key in f.keys():
a__ : Tuple = f.get_tensor(__UpperCamelCase )
else:
a__ : List[Any] = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )["state_dict"]
# Convert the VAE model.
a__ : Optional[int] = create_vae_diffusers_config(__UpperCamelCase , image_size=__UpperCamelCase )
a__ : List[Any] = custom_convert_ldm_vae_checkpoint(__UpperCamelCase , __UpperCamelCase )
a__ : Optional[int] = AutoencoderKL(**__UpperCamelCase )
vae.load_state_dict(__UpperCamelCase )
vae.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
lowerCamelCase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 207
| 0
|
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
A : str = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def UpperCamelCase ( __magic_name__ : Dict , __magic_name__ : tuple , __magic_name__ : Path , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : Tuple=False , ) -> str:
"""simple docstring"""
output_path.parent.mkdir(parents=__magic_name__ , exist_ok=__magic_name__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__magic_name__ , __magic_name__ , f=output_path.as_posix() , input_names=__magic_name__ , output_names=__magic_name__ , dynamic_axes=__magic_name__ , do_constant_folding=__magic_name__ , use_external_data_format=__magic_name__ , enable_onnx_checker=__magic_name__ , opset_version=__magic_name__ , )
else:
export(
__magic_name__ , __magic_name__ , f=output_path.as_posix() , input_names=__magic_name__ , output_names=__magic_name__ , dynamic_axes=__magic_name__ , do_constant_folding=__magic_name__ , opset_version=__magic_name__ , )
@torch.no_grad()
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int , __magic_name__ : bool = False ) -> int:
"""simple docstring"""
lowercase__ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase__ = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
lowercase__ = """cpu"""
lowercase__ = Path(__magic_name__ )
# VAE DECODER
lowercase__ = AutoencoderKL.from_pretrained(model_path + """/vae""" )
lowercase__ = vae_decoder.config.latent_channels
# forward only through the decoder part
lowercase__ = vae_decoder.decode
onnx_export(
__magic_name__ , model_args=(
torch.randn(1 , __magic_name__ , 25 , 25 ).to(device=__magic_name__ , dtype=__magic_name__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__magic_name__ , )
del vae_decoder
if __name__ == "__main__":
A : Any = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=1_4,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
A : Dict = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 15
|
"""simple docstring"""
from __future__ import annotations
def _UpperCAmelCase ( __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> list:
_snake_case = []
_snake_case , _snake_case = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
_snake_case = result + left + right
return input_list
def _UpperCAmelCase ( __lowerCamelCase : list ) -> list:
if len(__lowerCamelCase ) <= 1:
return input_list
_snake_case = list(__lowerCamelCase )
# iteration for two-way merging
_snake_case = 2
while p <= len(__lowerCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ):
_snake_case = i
_snake_case = i + p - 1
_snake_case = (low + high + 1) // 2
_snake_case = merge(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# final merge of last two parts
if p * 2 >= len(__lowerCamelCase ):
_snake_case = i
_snake_case = merge(__lowerCamelCase , 0 , __lowerCamelCase , len(__lowerCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
UpperCAmelCase__ = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
UpperCAmelCase__ = []
else:
UpperCAmelCase__ = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 224
| 0
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase__ ( __snake_case , __snake_case ):
@register_to_config
def __init__( self ,*,
A__ = 4 ,A__ = 768 ,A__ ,A__ ,):
super().__init__()
_A : str = nn.Parameter(torch.zeros(A__ ) )
# parameters for additional clip time embeddings
_A : Optional[int] = nn.Linear(A__ ,A__ )
_A : Union[str, Any] = nn.Linear(A__ ,A__ )
# parameters for encoder hidden states
_A : List[Any] = clip_extra_context_tokens
_A : Optional[int] = nn.Linear(
A__ ,self.clip_extra_context_tokens * cross_attention_dim )
_A : Any = nn.Linear(A__ ,A__ )
_A : Tuple = nn.LayerNorm(A__ )
def A__ ( self ,*, A__ ,A__ ,A__ ,A__ ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_A : str = image_embeddings.shape[0]
_A : List[Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_A : int = classifier_free_guidance_embeddings.expand(
A__ ,-1 )
_A : int = torch.cat([classifier_free_guidance_embeddings, image_embeddings] ,dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_A : List[str] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_A : Dict = self.embedding_proj(A__ )
_A : Any = self.clip_image_embeddings_project_to_time_embeddings(A__ )
_A : List[Any] = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_A : Union[str, Any] = self.clip_extra_context_tokens_proj(A__ )
_A : List[Any] = clip_extra_context_tokens.reshape(A__ ,-1 ,self.clip_extra_context_tokens )
_A : str = clip_extra_context_tokens.permute(0 ,2 ,1 )
_A : int = self.encoder_hidden_states_proj(A__ )
_A : Dict = self.text_encoder_hidden_states_norm(A__ )
_A : Tuple = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] ,dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 709
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def a__ (__lowercase :str , __lowercase :int ) -> Dict:
assert isinstance(__lowercase , __lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a__ (__lowercase :Union[str, Any] , __lowercase :str , __lowercase :str , __lowercase :str ) -> int:
_A : str = tmp_path / '''cache'''
_A : Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_A : Any = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_sql_dataset(__lowercase , __lowercase )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a__ (__lowercase :Dict , __lowercase :Union[str, Any] , __lowercase :Dict , __lowercase :int ) -> List[str]:
_A : Union[str, Any] = tmp_path / '''cache'''
_A : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_A : Tuple = features.copy() if features else default_expected_features
_A : Union[str, Any] = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_A : int = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=__lowercase , cache_dir=__lowercase ).read()
_check_sql_dataset(__lowercase , __lowercase )
def a__ (__lowercase :Optional[Any] ) -> List[str]:
with contextlib.closing(sqlitea.connect(__lowercase ) ) as con:
_A : List[str] = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def a__ (__lowercase :Tuple , __lowercase :List[str] , __lowercase :Tuple ) -> str:
_A : Optional[int] = tmp_path / '''cache'''
_A : Dict = os.path.join(__lowercase , '''tmp.sql''' )
_A : Optional[Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowercase ).read()
SqlDatasetWriter(__lowercase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
_A : Dict = iter_sql_file(__lowercase )
_A : Any = iter_sql_file(__lowercase )
for rowa, rowa in zip(__lowercase , __lowercase ):
assert rowa == rowa
@require_sqlalchemy
def a__ (__lowercase :Optional[Any] , __lowercase :Tuple , __lowercase :Union[str, Any] ) -> Union[str, Any]:
_A : Optional[Any] = tmp_path / '''cache'''
_A : Union[str, Any] = os.path.join(__lowercase , '''tmp.sql''' )
_A : Any = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowercase ).read()
SqlDatasetWriter(__lowercase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
_A : Union[str, Any] = iter_sql_file(__lowercase )
_A : str = iter_sql_file(__lowercase )
for rowa, rowa in zip(__lowercase , __lowercase ):
assert rowa == rowa
@require_sqlalchemy
def a__ (__lowercase :Union[str, Any] , __lowercase :Dict , __lowercase :int ) -> Any:
_A : Optional[int] = tmp_path / '''cache'''
_A : Optional[Any] = os.path.join(__lowercase , '''tmp.sql''' )
_A : Tuple = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowercase ).read()
with pytest.raises(__lowercase ):
SqlDatasetWriter(__lowercase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 332
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = AltDiffusionPipeline
__magic_name__ = TEXT_TO_IMAGE_PARAMS
__magic_name__ = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_A = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
_A = CLIPTextModel(snake_case_ )
_A = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
_A = 77
_A = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase__ ( self , snake_case_ , snake_case_=0 ):
if str(snake_case_ ).startswith('mps' ):
_A = torch.manual_seed(snake_case_ )
else:
_A = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCAmelCase__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCAmelCase__ ( self ):
_A = 'cpu' # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
torch.manual_seed(0 )
_A = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
_A = RobertaSeriesModelWithTransformation(snake_case_ )
_A = text_encoder
_A = AltDiffusionPipeline(**snake_case_ )
_A = alt_pipe.to(snake_case_ )
alt_pipe.set_progress_bar_config(disable=snake_case_ )
_A = self.get_dummy_inputs(snake_case_ )
_A = 'A photo of an astronaut'
_A = alt_pipe(**snake_case_ )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array(
[0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self ):
_A = 'cpu' # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = PNDMScheduler(skip_prk_steps=snake_case_ )
torch.manual_seed(0 )
_A = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
_A = RobertaSeriesModelWithTransformation(snake_case_ )
_A = text_encoder
_A = AltDiffusionPipeline(**snake_case_ )
_A = alt_pipe.to(snake_case_ )
alt_pipe.set_progress_bar_config(disable=snake_case_ )
_A = self.get_dummy_inputs(snake_case_ )
_A = alt_pipe(**snake_case_ )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array(
[0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ):
# make sure here that pndm scheduler skips prk
_A = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=snake_case_ )
_A = alt_pipe.to(snake_case_ )
alt_pipe.set_progress_bar_config(disable=snake_case_ )
_A = 'A painting of a squirrel eating a burger'
_A = torch.manual_seed(0 )
_A = alt_pipe([prompt] , generator=snake_case_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self ):
_A = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
_A = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=snake_case_ , safety_checker=snake_case_ )
_A = alt_pipe.to(snake_case_ )
alt_pipe.set_progress_bar_config(disable=snake_case_ )
_A = 'A painting of a squirrel eating a burger'
_A = torch.manual_seed(0 )
_A = alt_pipe([prompt] , generator=snake_case_ , num_inference_steps=2 , output_type='numpy' )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 27
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": 512,
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = RetriBertTokenizer
_UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__="[UNK]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[PAD]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> int:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
a_ = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) )
a_ = do_lower_case
a_ = strip_accents
a_ = tokenize_chinese_chars
a_ = normalizer_class(**UpperCAmelCase__ )
a_ = do_lower_case
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str:
a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]:
a_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 697
| 0
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_a : List[Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_a : Any = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
_a : Tuple = spec.loader.load_module()
_a : Optional[int] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_a : Any = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
_a : Optional[int] = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def a_ ( ) -> Tuple:
"""simple docstring"""
snake_case : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case : Optional[Any] = False
# source code of `config_class`
snake_case : Any = inspect.getsource(__magic_name__ )
snake_case : Union[str, Any] = _re_checkpoint.findall(__magic_name__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case , snake_case : Tuple = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case : str = F"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
snake_case : Tuple = True
break
snake_case : Optional[int] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__magic_name__ )
if len(__magic_name__ ) > 0:
snake_case : Union[str, Any] = '''\n'''.join(sorted(__magic_name__ ) )
raise ValueError(F"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 84
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a_ ( a , unittest.TestCase ):
A__ : Dict = ReformerTokenizer
A__ : Optional[int] = ReformerTokenizerFast
A__ : str = True
A__ : Tuple = False
A__ : str = True
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
super().setUp()
snake_case : str = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : int = '''<s>'''
snake_case : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1_000 )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case : Any = self.get_tokenizer()
snake_case : str = self.get_rust_tokenizer()
snake_case : Tuple = '''I was born in 92000, and this is falsé.'''
snake_case : str = tokenizer.tokenize(UpperCAmelCase__ )
snake_case : int = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
snake_case : List[str] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : Optional[int] = tokenizer.encode(UpperCAmelCase__ )
snake_case : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[Any]=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case : str = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# Simple input
snake_case : Union[str, Any] = '''This is a simple input'''
snake_case : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case : int = ('''This is a simple input''', '''This is a pair''')
snake_case : int = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
snake_case : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
snake_case : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case : int = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case : List[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Any = '''Hello World!'''
snake_case : Optional[Any] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
snake_case : Any = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case : Union[str, Any] = ''' '''.join(UpperCAmelCase__ )
snake_case : Optional[int] = self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' )
snake_case : List[str] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
snake_case : Optional[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
snake_case : Tuple = encoded_sequence['''input_ids'''].shape
snake_case : List[Any] = ReformerModel(UpperCAmelCase__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
# fmt: off
snake_case : Tuple = {'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
snake_case : Tuple = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=UpperCAmelCase__ , sequences=UpperCAmelCase__ , )
| 84
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
# TODO: upload to AWS
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class lowerCamelCase_ ( snake_case_ ):
a__ = "retribert"
def __init__( self , __lowerCAmelCase=3_0_5_2_2 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=True , __lowerCAmelCase=1_2_8 , __lowerCAmelCase=0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
__magic_name__ :List[Any] = vocab_size
__magic_name__ :Tuple = hidden_size
__magic_name__ :Optional[Any] = num_hidden_layers
__magic_name__ :Dict = num_attention_heads
__magic_name__ :List[Any] = hidden_act
__magic_name__ :Optional[Any] = intermediate_size
__magic_name__ :Dict = hidden_dropout_prob
__magic_name__ :Optional[int] = attention_probs_dropout_prob
__magic_name__ :str = max_position_embeddings
__magic_name__ :Tuple = type_vocab_size
__magic_name__ :List[str] = initializer_range
__magic_name__ :Union[str, Any] = layer_norm_eps
__magic_name__ :Any = share_encoders
__magic_name__ :Optional[Any] = projection_dim
| 0
|
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : List[Any] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__( snake_case_ , unittest.TestCase ):
UpperCamelCase : List[str] = DebertaVaTokenizer
UpperCamelCase : Optional[int] = DebertaVaTokenizerFast
UpperCamelCase : Tuple = True
UpperCamelCase : Dict = True
def __magic_name__ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = """this is a test"""
__lowercase = """this is a test"""
return input_text, output_text
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """<pad>"""
__lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(__UpperCAmelCase ) , 3_0_0_0_1 )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """ \tHeLLo!how \n Are yoU? """
__lowercase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def __magic_name__ ( self ):
"""simple docstring"""
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def __magic_name__ ( self ):
"""simple docstring"""
pass
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = DebertaVaTokenizerFast(__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """ \tHeLLo!how \n Are yoU? """
__lowercase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
__lowercase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """This is a test"""
__lowercase = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
__lowercase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowercase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowercase = DebertaVaTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
__lowercase = DebertaVaTokenizerFast(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
__lowercase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# fmt: off
__lowercase = """I was born in 92000, and this is falsé."""
__lowercase = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
__lowercase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowercase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowercase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = rust_tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = DebertaVaTokenizer(__UpperCAmelCase )
__lowercase = tokenizer.encode("""sequence builders""" )
__lowercase = tokenizer.encode("""multi-sequence build""" )
__lowercase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __UpperCAmelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __UpperCAmelCase , )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = {"""input_ids""": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 566
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713
|
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
A = 300 # TEMPERATURE (unit = K)
def __A ( a_ :float , a_ :float , a_ :float , ) -> float:
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''')
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''')
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''')
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''')
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''')
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2)
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
| 0
|
"""simple docstring"""
def A ( _A = 600_851_475_143 ):
"""simple docstring"""
try:
snake_case_ :Dict = int(_A )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
snake_case_ :Dict = 2
snake_case_ :Any = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
snake_case_ :List[Any] = i
while n % i == 0:
snake_case_ :str = n // i
i += 1
return int(_A )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 584
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A ( _A, _A, _A, _A, _A ):
"""simple docstring"""
# Load configuration defined in the metadata file
with open(_A ) as metadata_file:
snake_case_ :Union[str, Any] = json.load(_A )
snake_case_ :List[Any] = LukeConfig(use_entity_aware_attention=_A, **metadata["model_config"] )
# Load in the weights from the checkpoint_path
snake_case_ :List[Any] = torch.load(_A, map_location="cpu" )
# Load the entity vocab file
snake_case_ :str = load_entity_vocab(_A )
snake_case_ :List[str] = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case_ :Tuple = AddedToken("<ent>", lstrip=_A, rstrip=_A )
snake_case_ :int = AddedToken("<ent2>", lstrip=_A, rstrip=_A )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(_A )
with open(os.path.join(_A, LukeTokenizer.vocab_files_names["entity_vocab_file"] ), "w" ) as f:
json.dump(_A, _A )
snake_case_ :Tuple = LukeTokenizer.from_pretrained(_A )
# Initialize the embeddings of the special tokens
snake_case_ :Dict = state_dict["embeddings.word_embeddings.weight"]
snake_case_ :Optional[Any] = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
snake_case_ :Tuple = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
snake_case_ :Any = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case_ :Any = F'''encoder.layer.{layer_index}.attention.self.'''
snake_case_ :Optional[int] = state_dict[prefix + matrix_name]
snake_case_ :Dict = state_dict[prefix + matrix_name]
snake_case_ :Optional[int] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case_ :Union[str, Any] = state_dict["entity_embeddings.entity_embeddings.weight"]
snake_case_ :List[str] = entity_emb[entity_vocab["[MASK]"]]
snake_case_ :int = LukeModel(config=_A ).eval()
snake_case_ , snake_case_ :int = model.load_state_dict(_A, strict=_A )
if not (len(_A ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {', '.join(_A )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' )
# Check outputs
snake_case_ :Any = LukeTokenizer.from_pretrained(_A, task="entity_classification" )
snake_case_ :Tuple = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
snake_case_ :Dict = (39, 42)
snake_case_ :Any = tokenizer(_A, entity_spans=[span], add_prefix_space=_A, return_tensors="pt" )
snake_case_ :Tuple = model(**_A )
# Verify word hidden states
if model_size == "large":
snake_case_ :Tuple = torch.Size((1, 42, 1_024) )
snake_case_ :Dict = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
snake_case_ :Tuple = torch.Size((1, 42, 768) )
snake_case_ :Dict = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], _A, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
snake_case_ :List[str] = torch.Size((1, 1, 1_024) )
snake_case_ :Dict = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
snake_case_ :Optional[int] = torch.Size((1, 1, 768) )
snake_case_ :List[str] = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], _A, atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(_A ) )
model.save_pretrained(_A )
def A ( _A ):
"""simple docstring"""
snake_case_ :List[Any] = {}
with open(_A, "r", encoding="utf-8" ) as f:
for index, line in enumerate(_A ):
snake_case_ , snake_case_ :Tuple = line.rstrip().split("\t" )
snake_case_ :Dict = index
return entity_vocab
if __name__ == "__main__":
__UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__UpperCAmelCase : Optional[int] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 584
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
a : Dict = logging.get_logger(__name__)
a : Optional[int] = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "perceiver"
def __init__( self , snake_case__=256 , snake_case__=1280 , snake_case__=768 , snake_case__=1 , snake_case__=26 , snake_case__=8 , snake_case__=8 , snake_case__=None , snake_case__=None , snake_case__="kv" , snake_case__=1 , snake_case__=1 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=262 , snake_case__=2048 , snake_case__=56 , snake_case__=[368, 496] , snake_case__=16 , snake_case__=1920 , snake_case__=16 , snake_case__=[1, 16, 224, 224] , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase__ : Optional[Any]= num_latents
lowercase__ : Optional[int]= d_latents
lowercase__ : Tuple= d_model
lowercase__ : int= num_blocks
lowercase__ : Tuple= num_self_attends_per_block
lowercase__ : Any= num_self_attention_heads
lowercase__ : Dict= num_cross_attention_heads
lowercase__ : Optional[Any]= qk_channels
lowercase__ : int= v_channels
lowercase__ : Optional[Any]= cross_attention_shape_for_attention
lowercase__ : Dict= self_attention_widening_factor
lowercase__ : Dict= cross_attention_widening_factor
lowercase__ : Optional[int]= hidden_act
lowercase__ : Any= attention_probs_dropout_prob
lowercase__ : Optional[int]= initializer_range
lowercase__ : List[str]= layer_norm_eps
lowercase__ : str= use_query_residual
# masked language modeling attributes
lowercase__ : List[str]= vocab_size
lowercase__ : List[str]= max_position_embeddings
# image classification attributes
lowercase__ : int= image_size
# flow attributes
lowercase__ : Dict= train_size
# multimodal autoencoding attributes
lowercase__ : Any= num_frames
lowercase__ : Tuple= audio_samples_per_frame
lowercase__ : Any= samples_per_patch
lowercase__ : Union[str, Any]= output_shape
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : int= {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : List[str]= {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return 1e-4
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , snake_case__ = 3 , snake_case__ = 40 , snake_case__ = 40 , ):
'''simple docstring'''
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(snake_case__ , snake_case__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ : Tuple= compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ : List[str]= preprocessor.num_special_tokens_to_add(snake_case__ )
lowercase__ : Any= compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
lowercase__ : List[str]= [" ".join(["a"] ) * seq_length] * batch_size
lowercase__ : List[str]= dict(preprocessor(snake_case__ , return_tensors=snake_case__ ) )
lowercase__ : Any= inputs.pop("input_ids" )
return inputs
elif isinstance(snake_case__ , snake_case__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ : List[str]= compute_effective_axis_dimension(snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase__ : List[str]= self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase__ : int= dict(preprocessor(images=snake_case__ , return_tensors=snake_case__ ) )
lowercase__ : Optional[int]= inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 85
|
"""simple docstring"""
def lowercase__(A ) ->list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ : list= []
for temp in range(int(A ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
a : Dict = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 85
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = 42
_UpperCAmelCase : int = 42
def __init__( self , __magic_name__ , __magic_name__ ):
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self , __magic_name__ = 1 , __magic_name__ = 5_0 , __magic_name__ = None , __magic_name__ = "pil" , __magic_name__ = True , **__magic_name__ , ):
lowerCamelCase : List[Any] = self.unet.config.sample_size
lowerCamelCase : Tuple = (batch_size, 3, img_size, img_size)
lowerCamelCase : str = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
lowerCamelCase : Optional[int] = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
lowerCamelCase : Tuple = self.scheduler.schedule[t]
lowerCamelCase : int = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
lowerCamelCase : Dict = self.scheduler.add_noise_to_input(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
lowerCamelCase : int = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
lowerCamelCase : List[str] = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
lowerCamelCase : List[Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
lowerCamelCase : str = self.scheduler.step_correct(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , step_output.prev_sample , step_output["""derivative"""] , )
lowerCamelCase : Dict = step_output.prev_sample
lowerCamelCase : Dict = (sample / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase : str = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase : List[Any] = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 681
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = params
__magic_name__ :Any = np.array(__lowerCAmelCase )
__magic_name__ :Optional[Any] = np.array([len(__lowerCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
"""simple docstring"""
return len(self.lengths )
def A ( self ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.params.max_model_input_size
__magic_name__ :int = self.lengths > max_len
logger.info(F'''Splitting {sum(__lowerCAmelCase )} too long sequences.''' )
def divide_chunks(__lowerCAmelCase , __lowerCAmelCase ):
return [l[i : i + n] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )]
__magic_name__ :Optional[int] = []
__magic_name__ :List[Any] = []
if self.params.mlm:
__magic_name__ , __magic_name__ :Optional[Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
__magic_name__ , __magic_name__ :Tuple = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__magic_name__ :int = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__magic_name__ :List[Any] = np.insert(__lowerCAmelCase , 0 , __lowerCAmelCase )
if sub_s[-1] != sep_id:
__magic_name__ :Union[str, Any] = np.insert(__lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase )
assert len(__lowerCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowerCAmelCase )
new_tok_ids.extend(__lowerCAmelCase )
new_lengths.extend([len(__lowerCAmelCase ) for l in sub_seqs] )
__magic_name__ :Tuple = np.array(__lowerCAmelCase )
__magic_name__ :Optional[int] = np.array(__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = len(self )
__magic_name__ :int = self.lengths > 1_1
__magic_name__ :List[str] = self.token_ids[indices]
__magic_name__ :Union[str, Any] = self.lengths[indices]
__magic_name__ :List[str] = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def A ( self ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
__magic_name__ :Tuple = self.params.special_tok_ids['''unk_token''']
__magic_name__ :Dict = len(self )
__magic_name__ :Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__magic_name__ :int = (unk_occs / self.lengths) < 0.5
__magic_name__ :str = self.token_ids[indices]
__magic_name__ :str = self.lengths[indices]
__magic_name__ :Any = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def A ( self ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = [t[0] for t in batch]
__magic_name__ :List[Any] = [t[1] for t in batch]
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
# Max for paddings
__magic_name__ :Tuple = max(__lowerCAmelCase )
# Pad token ids
if self.params.mlm:
__magic_name__ :Any = self.params.special_tok_ids['''pad_token''']
else:
__magic_name__ :str = self.params.special_tok_ids['''unk_token''']
__magic_name__ :Any = [list(t.astype(__lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(__lowerCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(__lowerCAmelCase )
assert all(len(__lowerCAmelCase ) == max_seq_len_ for t in tk_ )
__magic_name__ :Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_)
__magic_name__ :Optional[int] = torch.tensor(__lowerCAmelCase ) # (bs)
return tk_t, lg_t
| 0
| 0
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , *_A , **_A ):
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 704
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _A:
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=False , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.0_2 , _A=3 , _A=4 , _A=None , ):
__A : Union[str, Any] = parent
__A : List[str] = batch_size
__A : Optional[int] = seq_length
__A : List[Any] = is_training
__A : Optional[Any] = use_input_mask
__A : List[Any] = use_token_type_ids
__A : Optional[Any] = use_labels
__A : List[str] = vocab_size
__A : Optional[int] = hidden_size
__A : List[Any] = num_hidden_layers
__A : int = num_attention_heads
__A : Dict = intermediate_size
__A : Any = hidden_act
__A : Union[str, Any] = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : Optional[int] = max_position_embeddings
__A : Dict = type_vocab_size
__A : Any = type_sequence_label_size
__A : Dict = initializer_range
__A : str = num_labels
__A : Union[str, Any] = num_choices
__A : str = scope
def UpperCAmelCase_ ( self ):
__A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Optional[Any] = None
if self.use_input_mask:
__A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__A : Dict = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Dict = None
__A : List[Any] = None
__A : List[Any] = None
if self.use_labels:
__A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__A : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A ):
__A : List[str] = LlamaModel(config=_A )
model.to(_A )
model.eval()
__A : Any = model(_A , attention_mask=_A )
__A : Any = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Dict = True
__A : int = LlamaModel(_A )
model.to(_A )
model.eval()
__A : str = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
__A : int = model(
_A , attention_mask=_A , encoder_hidden_states=_A , )
__A : List[Any] = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Optional[Any] = LlamaForCausalLM(config=_A )
model.to(_A )
model.eval()
__A : List[Any] = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : int = True
__A : List[Any] = True
__A : List[Any] = LlamaForCausalLM(config=_A )
model.to(_A )
model.eval()
# first forward pass
__A : Optional[Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , use_cache=_A , )
__A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__A : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__A : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__A : str = torch.cat([input_mask, next_mask] , dim=-1 )
__A : Tuple = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_hidden_states=_A , )['hidden_states'][0]
__A : Union[str, Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['hidden_states'][0]
# select random slice
__A : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__A : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : Tuple = config_and_inputs
__A : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _A( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
UpperCamelCase : Optional[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase : Optional[Any] = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase : int = False
UpperCamelCase : Dict = False
def UpperCAmelCase_ ( self ):
__A : List[Any] = LlamaModelTester(self )
__A : Optional[int] = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A : int = type
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A , __A : int = self.model_tester.prepare_config_and_inputs_for_common()
__A : str = 3
__A : Optional[int] = input_dict['input_ids']
__A : int = input_ids.ne(1 ).to(_A )
__A : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Optional[Any] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : List[Any] = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self ):
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Union[str, Any] = 3
__A : Tuple = 'single_label_classification'
__A : Union[str, Any] = input_dict['input_ids']
__A : List[str] = input_ids.ne(1 ).to(_A )
__A : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Optional[int] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : Tuple = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self ):
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = 3
__A : int = 'multi_label_classification'
__A : int = input_dict['input_ids']
__A : List[str] = input_ids.ne(1 ).to(_A )
__A : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__A : List[Any] = LlamaForSequenceClassification(_A )
model.to(_A )
model.eval()
__A : Tuple = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def UpperCAmelCase_ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCAmelCase_ ( self , _A ):
__A , __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Dict = ids_tensor([1, 10] , config.vocab_size )
__A : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : List[Any] = LlamaModel(_A )
original_model.to(_A )
original_model.eval()
__A : Dict = original_model(_A ).last_hidden_state
__A : int = original_model(_A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : int = {'type': scaling_type, 'factor': 1_0.0}
__A : str = LlamaModel(_A )
scaled_model.to(_A )
scaled_model.eval()
__A : Dict = scaled_model(_A ).last_hidden_state
__A : str = scaled_model(_A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_A , _A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : Tuple = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
__A : Union[str, Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__A : Optional[int] = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : str = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : int = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : List[str] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
__A : int = model(torch.tensor(_A ) )
# Expected mean on dim = -1
__A : List[str] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : List[str] = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCAmelCase_ ( self ):
__A : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : Tuple = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
__A : Optional[int] = model(torch.tensor(_A ) )
# Expected mean on dim = -1
__A : List[str] = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__A : Optional[Any] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def UpperCAmelCase_ ( self ):
__A : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__A : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
__A : List[Any] = model(torch.tensor(_A ) )
__A : Tuple = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _A , atol=1e-2 , rtol=1e-2 )
# fmt: off
__A : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def UpperCAmelCase_ ( self ):
__A : Tuple = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
__A : List[str] = 'Simply put, the theory of relativity states that '
__A : Union[str, Any] = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
__A : List[str] = tokenizer.encode(_A , return_tensors='pt' )
__A : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=_A )
# greedy generation outputs
__A : Union[str, Any] = model.generate(_A , max_new_tokens=64 , top_p=_A , temperature=1 , do_sample=_A )
__A : List[str] = tokenizer.decode(generated_ids[0] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
| 77
| 0
|
'''simple docstring'''
import unittest
from transformers import DonutProcessor
a = "naver-clova-ix/donut-base"
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = DonutProcessor.from_pretrained(lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {
"""name""": """John Doe""",
"""age""": """99""",
"""city""": """Atlanta""",
"""state""": """GA""",
"""zip""": """30301""",
"""phone""": """123-4567""",
"""nicknames""": [{"""nickname""": """Johnny"""}, {"""nickname""": """JD"""}],
}
__SCREAMING_SNAKE_CASE = (
"""<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"""
"""<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"""
"""<s_nicknames><s_nickname>Johnny</s_nickname>"""
"""<sep/><s_nickname>JD</s_nickname></s_nicknames>"""
)
__SCREAMING_SNAKE_CASE = self.processor.tokenajson(lowerCamelCase )
self.assertDictEqual(lowerCamelCase ,lowerCamelCase )
| 109
|
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_UpperCamelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCamelCase : Tuple = 2_56
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
__lowerCamelCase = ['''melgan''']
def __init__( self: List[str] ,a: SpectrogramNotesEncoder ,a: SpectrogramContEncoder ,a: TaFilmDecoder ,a: DDPMScheduler ,a: OnnxRuntimeModel if is_onnx_available() else Any ,):
super().__init__()
# From MELGAN
__UpperCAmelCase = math.log(1e-5 ) # Matches MelGAN training.
__UpperCAmelCase = 4.0 # Largest value for most examples
__UpperCAmelCase = 128
self.register_modules(
notes_encoder=a ,continuous_encoder=a ,decoder=a ,scheduler=a ,melgan=a ,)
def snake_case ( self: List[str] ,a: Union[str, Any] ,a: Optional[Any]=(-1.0, 1.0) ,a: Optional[int]=False ):
__UpperCAmelCase , __UpperCAmelCase = output_range
if clip:
__UpperCAmelCase = torch.clip(a ,self.min_value ,self.max_value )
# Scale to [0, 1].
__UpperCAmelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def snake_case ( self: List[Any] ,a: List[str] ,a: int=(-1.0, 1.0) ,a: Optional[int]=False ):
__UpperCAmelCase , __UpperCAmelCase = input_range
__UpperCAmelCase = torch.clip(a ,a ,a ) if clip else outputs
# Scale to [0, 1].
__UpperCAmelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def snake_case ( self: Optional[int] ,a: Any ,a: Optional[Any] ,a: Optional[Any] ):
__UpperCAmelCase = input_tokens > 0
__UpperCAmelCase , __UpperCAmelCase = self.notes_encoder(
encoder_input_tokens=a ,encoder_inputs_mask=a )
__UpperCAmelCase , __UpperCAmelCase = self.continuous_encoder(
encoder_inputs=a ,encoder_inputs_mask=a )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def snake_case ( self: Optional[int] ,a: int ,a: str ,a: Dict ):
__UpperCAmelCase = noise_time
if not torch.is_tensor(a ):
__UpperCAmelCase = torch.tensor([timesteps] ,dtype=torch.long ,device=input_tokens.device )
elif torch.is_tensor(a ) and len(timesteps.shape ) == 0:
__UpperCAmelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase = timesteps * torch.ones(input_tokens.shape[0] ,dtype=timesteps.dtype ,device=timesteps.device )
__UpperCAmelCase = self.decoder(
encodings_and_masks=a ,decoder_input_tokens=a ,decoder_noise_time=a )
return logits
@torch.no_grad()
def __call__( self: str ,a: List[List[int]] ,a: Optional[torch.Generator] = None ,a: int = 100 ,a: bool = True ,a: str = "numpy" ,a: Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,a: int = 1 ,):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a ,a ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(a )}.""" )
__UpperCAmelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] ,dtype=np.floataa )
__UpperCAmelCase = np.zeros([1, 0, self.n_dims] ,np.floataa )
__UpperCAmelCase = torch.ones((1, TARGET_FEATURE_LENGTH) ,dtype=a ,device=self.device )
for i, encoder_input_tokens in enumerate(a ):
if i == 0:
__UpperCAmelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device ,dtype=self.decoder.dtype )
# The first chunk has no previous context.
__UpperCAmelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) ,dtype=a ,device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__UpperCAmelCase = ones
__UpperCAmelCase = self.scale_features(
a ,output_range=[-1.0, 1.0] ,clip=a )
__UpperCAmelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) ,continuous_inputs=a ,continuous_mask=a ,)
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__UpperCAmelCase = randn_tensor(
shape=encoder_continuous_inputs.shape ,generator=a ,device=self.device ,dtype=self.decoder.dtype ,)
# set step values
self.scheduler.set_timesteps(a )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCAmelCase = self.decode(
encodings_and_masks=a ,input_tokens=a ,noise_time=t / self.scheduler.config.num_train_timesteps ,)
# Compute previous output: x_t -> x_t-1
__UpperCAmelCase = self.scheduler.step(a ,a ,a ,generator=a ).prev_sample
__UpperCAmelCase = self.scale_to_features(a ,input_range=[-1.0, 1.0] )
__UpperCAmelCase = mel[:1]
__UpperCAmelCase = mel.cpu().float().numpy()
__UpperCAmelCase = np.concatenate([full_pred_mel, pred_mel[:1]] ,axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a ,a )
logger.info('Generated segment' ,a )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
__UpperCAmelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__UpperCAmelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=a )
| 396
| 0
|
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_UpperCAmelCase : List[Any] = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :List[str] = test_results.split(" " )
lowercase :Dict = 0
lowercase :str = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowercase :List[Any] = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Union[str, Any] = {}
lowercase :Any = None
lowercase :int = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]", lowerCamelCase ):
lowercase :List[Any] = True
lowercase :int = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
lowercase :Optional[Any] = line
lowercase :Union[str, Any] = False
return failures
class __lowerCAmelCase :
def __init__( self: Optional[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Dict ):
lowercase :Tuple = title
lowercase :List[str] = doc_test_results["time_spent"].split("," )[0]
lowercase :Optional[int] = doc_test_results["success"]
lowercase :str = doc_test_results["failures"]
lowercase :Dict = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowercase :Dict = doc_test_results
@property
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :int = [self._time_spent]
lowercase :Optional[Any] = 0
for time in time_spent:
lowercase :List[Any] = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCAmelCase ) == 1:
lowercase :List[Any] = [0, 0, time_parts[0]]
lowercase :List[Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
lowercase :str = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F"{int(_lowerCAmelCase )}h{int(_lowerCAmelCase )}m{int(_lowerCAmelCase )}s"
@property
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def SCREAMING_SNAKE_CASE ( self: Tuple ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def SCREAMING_SNAKE_CASE ( self: Tuple ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
F" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase :int = 40
lowercase :str = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
lowercase :Dict = ""
for category, failures in category_failures.items():
if len(_lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase :Any = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCAmelCase )
@staticmethod
def SCREAMING_SNAKE_CASE ( ):
lowercase :str = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(_lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=_lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self: str ):
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
lowercase :Tuple = F"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed."
lowercase :str = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=_lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: Dict , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Any ):
lowercase :Optional[int] = ""
for key, value in failures.items():
lowercase :Optional[int] = value[:2_00] + " [Truncated]" if len(_lowerCAmelCase ) > 2_50 else value
failures_text += F"*{key}*\n_{value}_\n\n"
lowercase :int = job_name
lowercase :Tuple = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
lowercase :List[str] = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def SCREAMING_SNAKE_CASE ( self: Tuple ):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
lowercase :Optional[int] = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
lowercase :Union[str, Any] = sorted(self.doc_test_results.items() , key=lambda _lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
lowercase :Tuple = F"*Num failures* :{len(job_result['failed'] )} \n"
lowercase :Optional[int] = job_result["failures"]
lowercase :Union[str, Any] = self.get_reply_blocks(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text=_lowerCAmelCase )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F"Results for {job}" , blocks=_lowerCAmelCase , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def UpperCAmelCase__ ( ):
lowercase :Tuple = os.environ["GITHUB_RUN_ID"]
lowercase :Optional[int] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
lowercase :List[Any] = requests.get(lowerCamelCase ).json()
lowercase :str = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
lowercase :str = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowerCamelCase ):
lowercase :Union[str, Any] = requests.get(url + F"&page={i + 2}" ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links.", lowerCamelCase )
return {}
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Tuple = {}
if os.path.exists(lowerCamelCase ):
lowercase :int = os.listdir(lowerCamelCase )
for file in files:
try:
with open(os.path.join(lowerCamelCase, lowerCamelCase ), encoding="utf-8" ) as f:
lowercase :Dict = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"Could not open {os.path.join(lowerCamelCase, lowerCamelCase )}." ) from e
return _artifact
def UpperCAmelCase__ ( ):
class __lowerCAmelCase :
def __init__( self: List[str] , _lowerCAmelCase: str ):
lowercase :Tuple = name
lowercase :int = []
def __str__( self: int ):
return self.name
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: str ):
self.paths.append({"name": self.name, "path": path} )
lowercase :Dict[str, Artifact] = {}
lowercase :str = filter(os.path.isdir, os.listdir() )
for directory in directories:
lowercase :Tuple = directory
if artifact_name not in _available_artifacts:
lowercase :int = Artifact(lowerCamelCase )
_available_artifacts[artifact_name].add_path(lowerCamelCase )
return _available_artifacts
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = get_job_links()
_UpperCAmelCase : List[str] = retrieve_available_artifacts()
_UpperCAmelCase : Optional[int] = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_UpperCAmelCase : Tuple = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_UpperCAmelCase : Any = github_actions_job_links.get("run_doctests")
_UpperCAmelCase : Optional[Any] = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_UpperCAmelCase : Optional[Any] = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_UpperCAmelCase : int = handle_test_results(artifact["stats"])
_UpperCAmelCase : int = failed
_UpperCAmelCase : Optional[int] = success
_UpperCAmelCase : Optional[Any] = time_spent[1:-1] + ", "
_UpperCAmelCase : Dict = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_UpperCAmelCase : Union[str, Any] = line.replace("FAILED ", "")
_UpperCAmelCase : List[str] = line.split()[0].replace("\n", "")
if "::" in line:
_UpperCAmelCase : int = line.split("::")
else:
_UpperCAmelCase : List[str] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_UpperCAmelCase : Any = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_UpperCAmelCase : Optional[Any] = all_failures[test] if test in all_failures else "N/A"
_UpperCAmelCase : Dict = failure
break
_UpperCAmelCase : Any = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 702
|
_UpperCAmelCase : Union[str, Any] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 453
| 0
|
import math
from numpy import inf
from scipy.integrate import quad
def __a ( SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
return quad(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE , args=(SCREAMING_SNAKE_CASE) )[0]
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 303
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A_ : Any = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
__UpperCAmelCase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__UpperCAmelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__UpperCAmelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=16 , lowercase__=2 , lowercase__=4 , lowercase__=4 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=32 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=0.02 , ) -> Union[str, Any]:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = pad_token_id
__UpperCAmelCase = bos_token_id
__UpperCAmelCase = initializer_range
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__UpperCAmelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__UpperCAmelCase = shift_tokens_right(lowercase__ , 1 , 2 )
__UpperCAmelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase__ , )
__UpperCAmelCase = prepare_blenderbot_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, inputs_dict
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase , __UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
__UpperCAmelCase = 20
__UpperCAmelCase = model_class_name(lowercase__ )
__UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] )
__UpperCAmelCase , __UpperCAmelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
__UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
__UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase__ , )
__UpperCAmelCase = model.decode(lowercase__ , lowercase__ )
__UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
__UpperCAmelCase = 20
__UpperCAmelCase = model_class_name(lowercase__ )
__UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] )
__UpperCAmelCase , __UpperCAmelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
__UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
__UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase__ , decoder_position_ids=lowercase__ , )
__UpperCAmelCase = model.decode(lowercase__ , lowercase__ , decoder_attention_mask=lowercase__ )
__UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
a__ = 99
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__UpperCAmelCase = input_ids.shape[0]
__UpperCAmelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._get_config_and_data()
__UpperCAmelCase = FlaxBlenderbotSmallForConditionalGeneration(lowercase__ )
__UpperCAmelCase = lm_model(input_ids=lowercase__ )
__UpperCAmelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowercase__ )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__UpperCAmelCase = FlaxBlenderbotSmallForConditionalGeneration(lowercase__ )
__UpperCAmelCase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__UpperCAmelCase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__UpperCAmelCase = lm_model(input_ids=lowercase__ , decoder_input_ids=lowercase__ )
__UpperCAmelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowercase__ )
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__UpperCAmelCase = shift_tokens_right(lowercase__ , 1 , 2 )
__UpperCAmelCase = np.equal(lowercase__ , 1 ).astype(np.floataa ).sum()
__UpperCAmelCase = np.equal(lowercase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowercase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A_ ( _a , unittest.TestCase , _a ):
'''simple docstring'''
a__ = True
a__ = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
a__ = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = FlaxBlenderbotSmallModelTester(self )
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase = self._prepare_for_class(lowercase__ , lowercase__ )
__UpperCAmelCase = model_class(lowercase__ )
@jax.jit
def encode_jitted(lowercase__ , lowercase__=None , **lowercase__ ):
return model.encode(input_ids=lowercase__ , attention_mask=lowercase__ )
with self.subTest('''JIT Enabled''' ):
__UpperCAmelCase = encode_jitted(**lowercase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__UpperCAmelCase = encode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__UpperCAmelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase__ , lowercase__ , lowercase__ ):
return model.decode(
decoder_input_ids=lowercase__ , decoder_attention_mask=lowercase__ , encoder_outputs=lowercase__ , )
with self.subTest('''JIT Enabled''' ):
__UpperCAmelCase = decode_jitted(**lowercase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__UpperCAmelCase = decode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase_ (self ) -> Dict:
for model_class_name in self.all_model_classes:
__UpperCAmelCase = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__UpperCAmelCase = np.ones((1, 1) ) * model.config.eos_token_id
__UpperCAmelCase = model(lowercase__ )
self.assertIsNotNone(lowercase__ )
| 303
| 1
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return int((input_a, input_a).count(0 ) == 0 )
def lowerCAmelCase_ ( ):
assert and_gate(0,0 ) == 0
assert and_gate(0,1 ) == 0
assert and_gate(1,0 ) == 0
assert and_gate(1,1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 714
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = KandinskyVaaImgaImgPipeline
_a = ["image_embeds", "negative_image_embeds", "image"]
_a = [
"image_embeds",
"negative_image_embeds",
"image",
]
_a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> int:
return 32
@property
def a__ ( self ) -> Union[str, Any]:
return 32
@property
def a__ ( self ) -> List[str]:
return self.time_input_dim
@property
def a__ ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> str:
return 100
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_A : Union[str, Any] = UNetaDConditionModel(**_a )
return model
@property
def a__ ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self ) -> int:
_A : Any = self.dummy_unet
_A : List[Any] = self.dummy_movq
_A : str = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_A : int = DDIMScheduler(**_a )
_A : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self , _a , _a=0 ) -> str:
_A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_A : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : Optional[Any] = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((256, 256) )
if str(_a ).startswith("""mps""" ):
_A : Tuple = torch.manual_seed(_a )
else:
_A : str = torch.Generator(device=_a ).manual_seed(_a )
_A : Optional[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> Union[str, Any]:
_A : Dict = """cpu"""
_A : int = self.get_dummy_components()
_A : Optional[int] = self.pipeline_class(**_a )
_A : Any = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : List[Any] = pipe(**self.get_dummy_inputs(_a ) )
_A : Dict = output.images
_A : List[str] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_A : Dict = image[0, -3:, -3:, -1]
_A : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Optional[int] = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> List[str]:
_A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_A : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_A : Dict = """A red cartoon frog, 4k"""
_A : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_A : int = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_A : Dict = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A , _A : List[str] = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_A : int = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 54
| 0
|
class A_ :
def __init__( self , _A ):
'''simple docstring'''
UpperCAmelCase = size
UpperCAmelCase = [0] * size
UpperCAmelCase = [0] * size
@staticmethod
def _lowercase ( _A ):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def _lowercase ( _A ):
'''simple docstring'''
return (index & (index + 1)) - 1
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = value
while index < self.size:
UpperCAmelCase = self.get_prev(_A ) + 1
if current_left_border == index:
UpperCAmelCase = value
else:
UpperCAmelCase = max(_A , _A , _A )
UpperCAmelCase = self.get_next(_A )
def _lowercase ( self , _A , _A ):
'''simple docstring'''
right -= 1 # Because of right is exclusive
UpperCAmelCase = 0
while left <= right:
UpperCAmelCase = self.get_prev(_A )
if left <= current_left:
UpperCAmelCase = max(_A , self.tree[right] )
UpperCAmelCase = current_left
else:
UpperCAmelCase = max(_A , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 130
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__A : Optional[Any] = "sshleifer/bart-tiny-random"
__A : Dict = "patrickvonplaten/t5-tiny-random"
@require_torch
class A_ (unittest.TestCase ):
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return AutoConfig.from_pretrained(_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , *UpperCAmelCase = create_student_by_copying_alternating_layers(_A , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , *UpperCAmelCase = create_student_by_copying_alternating_layers(_A , tempfile.mkdtemp() , e=1 , d=_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , *UpperCAmelCase = create_student_by_copying_alternating_layers(_A , tempfile.mkdtemp() , e=1 , d=_A )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , *UpperCAmelCase = create_student_by_copying_alternating_layers(_A , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowercase ( self ):
'''simple docstring'''
with self.assertRaises(_A ):
create_student_by_copying_alternating_layers(_A , tempfile.mkdtemp() , e=_A , d=_A )
| 130
| 1
|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[Any] ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any = 1 , UpperCamelCase__ : Tuple = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_a , hypotheses=_a , min_len=_a , max_len=_a )
}
| 712
|
"""simple docstring"""
class _lowercase :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : Any ) -> int:
'''simple docstring'''
__UpperCamelCase =arr.split(''',''' )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
'''simple docstring'''
__UpperCamelCase =[int(self.array[0] )] * len(self.array )
__UpperCamelCase =[int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__UpperCamelCase =max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__UpperCamelCase =max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__lowercase = input('''please input some numbers:''')
__lowercase = SubArray(whole_array)
__lowercase = array.solve_sub_array()
print(('''the results is:''', re))
| 296
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCamelCase_ : ClassVar[Features] = Features({"""audio""": Audio()} )
lowerCamelCase_ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
lowerCamelCase_ : str = "audio"
lowerCamelCase_ : str = "transcription"
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , UpperCamelCase__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
lowerCamelCase : Any = copy.deepcopy(self )
lowerCamelCase : List[Any] = self.input_schema.copy()
lowerCamelCase : Optional[int] = features[self.audio_column]
lowerCamelCase : Tuple = input_schema
return task_template
@property
def _lowercase ( self ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 311
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = (KDPMaDiscreteScheduler,)
lowerCamelCase_ : str = 1_0
def _lowercase ( self , **UpperCamelCase__ ) -> int:
lowerCamelCase : Optional[Any] = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**UpperCamelCase__ )
return config
def _lowercase ( self ) -> List[Any]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def _lowercase ( self ) -> Dict:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def _lowercase ( self ) -> str:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def _lowercase ( self ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def _lowercase ( self ) -> str:
lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="v_prediction" )
lowerCamelCase : Optional[Any] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Tuple = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Union[str, Any] = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : List[str] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : List[str] = model(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[int] = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase : Optional[Any] = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def _lowercase ( self ) -> str:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[int] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Optional[Any] = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : int = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = model(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : int = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase : Dict = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def _lowercase ( self ) -> Optional[int]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Any = self.get_scheduler_config()
lowerCamelCase : str = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(UpperCamelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Dict = model(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = output.prev_sample
lowerCamelCase : Dict = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase : Dict = torch.mean(torch.abs(UpperCamelCase__ ) )
if str(UpperCamelCase__ ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 311
| 1
|
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1024 ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ =[], []
lowerCamelCase__ =list(zip(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCamelCase__ , lowerCamelCase__ =sorted_examples[0]
def is_too_big(__lowerCAmelCase ):
return tok(__lowerCAmelCase , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
lowerCamelCase__ =new_src + " " + src
lowerCamelCase__ =new_tgt + " " + tgt
if is_too_big(__lowerCAmelCase ) or is_too_big(__lowerCAmelCase ): # cant fit, finalize example
finished_src.append(__lowerCAmelCase )
finished_tgt.append(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ =src, tgt
else: # can fit, keep adding
lowerCamelCase__ , lowerCamelCase__ =cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__lowerCAmelCase )
finished_tgt.append(__lowerCAmelCase )
return finished_src, finished_tgt
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ =Path(__lowerCAmelCase )
save_path.mkdir(exist_ok=__lowerCAmelCase )
for split in ["train"]:
lowerCamelCase__ , lowerCamelCase__ =data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
lowerCamelCase__ =[x.rstrip() for x in Path(__lowerCAmelCase ).open().readlines()]
lowerCamelCase__ =[x.rstrip() for x in Path(__lowerCAmelCase ).open().readlines()]
lowerCamelCase__ , lowerCamelCase__ =pack_examples(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
print(F'''packed {split} split from {len(__lowerCAmelCase )} examples -> {len(__lowerCAmelCase )}.''' )
Path(save_path / F'''{split}.source''' ).open("w" ).write("\n".join(__lowerCAmelCase ) )
Path(save_path / F'''{split}.target''' ).open("w" ).write("\n".join(__lowerCAmelCase ) )
for split in ["val", "test"]:
lowerCamelCase__ , lowerCamelCase__ =data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(__lowerCAmelCase , save_path / F'''{split}.source''' )
shutil.copyfile(__lowerCAmelCase , save_path / F'''{split}.target''' )
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ =argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=__lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=__lowerCAmelCase , default=128 )
parser.add_argument("--data_dir" , type=__lowerCAmelCase )
parser.add_argument("--save_path" , type=__lowerCAmelCase )
lowerCamelCase__ =parser.parse_args()
lowerCamelCase__ =AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__lowerCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 132
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
a =logging.get_logger(__name__)
a ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a ={
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
a ={'mobilebert-uncased': 512}
a ={}
class __UpperCAmelCase ( __lowerCAmelCase ):
A__ : Any = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : List[str] = PRETRAINED_INIT_CONFIGURATION
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = MobileBertTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase="[UNK]" , _lowerCamelCase="[SEP]" , _lowerCamelCase="[PAD]" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
lowerCamelCase__ =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _lowerCamelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ =getattr(_lowerCamelCase , normalizer_state.pop("type" ) )
lowerCamelCase__ =do_lower_case
lowerCamelCase__ =strip_accents
lowerCamelCase__ =tokenize_chinese_chars
lowerCamelCase__ =normalizer_class(**_lowerCamelCase )
lowerCamelCase__ =do_lower_case
def _a ( self , _lowerCamelCase , _lowerCamelCase=None ):
lowerCamelCase__ =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
lowerCamelCase__ =[self.sep_token_id]
lowerCamelCase__ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
lowerCamelCase__ =self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 132
| 1
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __magic_name__ :
'''simple docstring'''
def __init__( self:int , _a:Optional[Any] , _a:Any=2 , _a:Dict=True , _a:List[Any]=False , _a:List[str]=10 , _a:Union[str, Any]=3 , _a:Tuple=32 * 8 , _a:Dict=32 * 8 , _a:List[str]=4 , _a:Union[str, Any]=64 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = is_training
snake_case__ = use_auxiliary_loss
snake_case__ = num_queries
snake_case__ = num_channels
snake_case__ = min_size
snake_case__ = max_size
snake_case__ = num_labels
snake_case__ = hidden_dim
snake_case__ = hidden_dim
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
snake_case__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_a )
snake_case__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_a ) > 0.5
).float()
snake_case__ = (torch.rand((self.batch_size, self.num_labels) , device=_a ) > 0.5).long()
snake_case__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
snake_case__ = self.num_queries
snake_case__ = self.num_labels
snake_case__ = [1, 1, 1, 1]
snake_case__ = self.num_channels
snake_case__ = 64
snake_case__ = 1_28
snake_case__ = self.hidden_dim
snake_case__ = self.hidden_dim
snake_case__ = self.hidden_dim
return config
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = self.prepare_config_and_inputs()
snake_case__ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:str ):
snake_case__ = output.encoder_hidden_states
snake_case__ = output.pixel_decoder_hidden_states
snake_case__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , config.decoder_layers )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Optional[Any] , _a:Optional[int] , _a:List[str] , _a:Tuple=False ):
with torch.no_grad():
snake_case__ = MaskaFormerModel(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(pixel_values=_a , pixel_mask=_a )
snake_case__ = model(_a , output_hidden_states=_a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:str , _a:Dict , _a:Optional[Any] , _a:str , _a:str ):
snake_case__ = MaskaFormerForUniversalSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(_a:Any ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case__ = model(pixel_values=_a , pixel_mask=_a )
snake_case__ = model(_a )
comm_check_on_output(_a )
snake_case__ = model(
pixel_values=_a , pixel_mask=_a , mask_labels=_a , class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__lowercase : int = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
__lowercase : List[Any] = False
__lowercase : str = False
__lowercase : Union[str, Any] = False
__lowercase : int = False
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = MaskaFormerModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a , has_text_modality=_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def SCREAMING_SNAKE_CASE__ ( self:int ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a )
snake_case__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
@slow
def SCREAMING_SNAKE_CASE__ ( self:int ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
snake_case__ = MaskaFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = (self.model_tester.min_size,) * 2
snake_case__ = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_a ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_a ),
'''class_labels''': torch.zeros(2 , 10 , device=_a ).long(),
}
snake_case__ = self.model_tester.get_config()
snake_case__ = MaskaFormerForUniversalSegmentation(_a ).to(_a )
snake_case__ = model(**_a )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a ).to(_a )
snake_case__ = model(**_a , output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
if not self.model_tester.is_training:
return
snake_case__ = self.all_model_classes[1]
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs()
snake_case__ = model_class(_a )
model.to(_a )
model.train()
snake_case__ = model(_a , mask_labels=_a , class_labels=_a ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.all_model_classes[1]
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs()
snake_case__ = True
snake_case__ = True
snake_case__ = model_class(_a ).to(_a )
model.train()
snake_case__ = model(_a , mask_labels=_a , class_labels=_a )
snake_case__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
snake_case__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ : Tuple = 1E-4
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:int ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(_a , return_tensors='''pt''' ).to(_a )
snake_case__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 3_84, 3_84) )
with torch.no_grad():
snake_case__ = model(**_a )
snake_case__ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
snake_case__ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
snake_case__ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _a , atol=_a ) )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(_a , return_tensors='''pt''' ).to(_a )
snake_case__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 3_84, 3_84) )
with torch.no_grad():
snake_case__ = model(**_a )
# masks_queries_logits
snake_case__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
snake_case__ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
snake_case__ = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) )
# class_queries_logits
snake_case__ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
snake_case__ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
snake_case__ = self.default_image_processor
snake_case__ = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
snake_case__ = inputs['''pixel_values'''].to(_a )
snake_case__ = [el.to(_a ) for el in inputs['''mask_labels''']]
snake_case__ = [el.to(_a ) for el in inputs['''class_labels''']]
with torch.no_grad():
snake_case__ = model(**_a )
self.assertTrue(outputs.loss is not None )
| 33
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[int] = 'data2vec-vision'
def __init__( self:int , _a:Tuple=7_68 , _a:int=12 , _a:Any=12 , _a:Optional[int]=30_72 , _a:Optional[int]="gelu" , _a:Any=0.0 , _a:Any=0.0 , _a:List[str]=0.02 , _a:Dict=1e-12 , _a:Tuple=2_24 , _a:Any=16 , _a:str=3 , _a:str=False , _a:Union[str, Any]=False , _a:Optional[int]=False , _a:Any=False , _a:Dict=0.1 , _a:Dict=0.1 , _a:str=True , _a:str=[3, 5, 7, 11] , _a:List[str]=[1, 2, 3, 6] , _a:List[str]=True , _a:Any=0.4 , _a:str=2_56 , _a:Union[str, Any]=1 , _a:int=False , _a:Optional[int]=2_55 , **_a:Dict , ):
super().__init__(**_a )
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = use_mask_token
snake_case__ = use_absolute_position_embeddings
snake_case__ = use_relative_position_bias
snake_case__ = use_shared_relative_position_bias
snake_case__ = layer_scale_init_value
snake_case__ = drop_path_rate
snake_case__ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case__ = out_indices
snake_case__ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case__ = use_auxiliary_head
snake_case__ = auxiliary_loss_weight
snake_case__ = auxiliary_channels
snake_case__ = auxiliary_num_convs
snake_case__ = auxiliary_concat_input
snake_case__ = semantic_loss_ignore_index
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Any = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return 1e-4
| 33
| 1
|
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = [file for file in os.listdir(SCREAMING_SNAKE_CASE__ ) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )]
if identifier is not None:
SCREAMING_SNAKE_CASE__ : int = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for n_ in n_identifier:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [file for file in files if n_ not in file]
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [file for file in files if n_identifier not in file]
SCREAMING_SNAKE_CASE__ : int = ignore_files or []
ignore_files.append("""__init__.py""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , SCREAMING_SNAKE_CASE__ )
if only_modules:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = file.split(""".""" )[0]
try:
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = doctest.DocTestSuite(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = Path("""src/transformers""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = """modeling"""
SCREAMING_SNAKE_CASE__ : int = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(SCREAMING_SNAKE_CASE__ , identifier=SCREAMING_SNAKE_CASE__ , ignore_files=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = Path("""src/transformers""" )
SCREAMING_SNAKE_CASE__ : List[str] = """tokenization"""
self.analyze_directory(SCREAMING_SNAKE_CASE__ , identifier=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = Path("""src/transformers""" )
SCREAMING_SNAKE_CASE__ : List[str] = """configuration"""
self.analyze_directory(SCREAMING_SNAKE_CASE__ , identifier=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path("""src/transformers""" )
SCREAMING_SNAKE_CASE__ : Dict = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(SCREAMING_SNAKE_CASE__ , n_identifier=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = Path("""docs/source""" )
SCREAMING_SNAKE_CASE__ : Any = ["""favicon.ico"""]
self.analyze_directory(SCREAMING_SNAKE_CASE__ , ignore_files=SCREAMING_SNAKE_CASE__ , only_modules=SCREAMING_SNAKE_CASE__ )
| 545
|
"""simple docstring"""
from math import pi, sqrt, tan
def lowercase_ ( _snake_case ):
if side_length < 0:
raise ValueError("""surface_area_cube() only accepts non-negative values""" )
return 6 * side_length**2
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("""surface_area_cuboid() only accepts non-negative values""" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowercase_ ( _snake_case ):
if radius < 0:
raise ValueError("""surface_area_sphere() only accepts non-negative values""" )
return 4 * pi * radius**2
def lowercase_ ( _snake_case ):
if radius < 0:
raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" )
return 3 * pi * radius**2
def lowercase_ ( _snake_case ,_snake_case ):
if radius < 0 or height < 0:
raise ValueError("""surface_area_cone() only accepts non-negative values""" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"""surface_area_conical_frustum() only accepts non-negative values""" )
SCREAMING_SNAKE_CASE__ : int = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowercase_ ( _snake_case ,_snake_case ):
if radius < 0 or height < 0:
raise ValueError("""surface_area_cylinder() only accepts non-negative values""" )
return 2 * pi * radius * (height + radius)
def lowercase_ ( _snake_case ,_snake_case ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("""surface_area_torus() only accepts non-negative values""" )
if torus_radius < tube_radius:
raise ValueError(
"""surface_area_torus() does not support spindle or self intersecting tori""" )
return 4 * pow(_snake_case ,2 ) * torus_radius * tube_radius
def lowercase_ ( _snake_case ,_snake_case ):
if length < 0 or width < 0:
raise ValueError("""area_rectangle() only accepts non-negative values""" )
return length * width
def lowercase_ ( _snake_case ):
if side_length < 0:
raise ValueError("""area_square() only accepts non-negative values""" )
return side_length**2
def lowercase_ ( _snake_case ,_snake_case ):
if base < 0 or height < 0:
raise ValueError("""area_triangle() only accepts non-negative values""" )
return (base * height) / 2
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("""Given three sides do not form a triangle""" )
SCREAMING_SNAKE_CASE__ : List[str] = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE__ : List[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowercase_ ( _snake_case ,_snake_case ):
if base < 0 or height < 0:
raise ValueError("""area_parallelogram() only accepts non-negative values""" )
return base * height
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("""area_trapezium() only accepts non-negative values""" )
return 1 / 2 * (basea + basea) * height
def lowercase_ ( _snake_case ):
if radius < 0:
raise ValueError("""area_circle() only accepts non-negative values""" )
return pi * radius**2
def lowercase_ ( _snake_case ,_snake_case ):
if radius_x < 0 or radius_y < 0:
raise ValueError("""area_ellipse() only accepts non-negative values""" )
return pi * radius_x * radius_y
def lowercase_ ( _snake_case ,_snake_case ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("""area_rhombus() only accepts non-negative values""" )
return 1 / 2 * diagonal_a * diagonal_a
def lowercase_ ( _snake_case ,_snake_case ):
if not isinstance(_snake_case ,_snake_case ) or sides < 3:
raise ValueError(
"""area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides""" )
elif length < 0:
raise ValueError(
"""area_reg_polygon() only accepts non-negative values as \
length of a side""" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"""Rectangle: {area_rectangle(1_0, 2_0) = }""")
print(f"""Square: {area_square(1_0) = }""")
print(f"""Triangle: {area_triangle(1_0, 1_0) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }""")
print(f"""Parallelogram: {area_parallelogram(1_0, 2_0) = }""")
print(f"""Rhombus: {area_rhombus(1_0, 2_0) = }""")
print(f"""Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }""")
print(f"""Circle: {area_circle(2_0) = }""")
print(f"""Ellipse: {area_ellipse(1_0, 2_0) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(f"""Cube: {surface_area_cube(2_0) = }""")
print(f"""Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }""")
print(f"""Sphere: {surface_area_sphere(2_0) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(2_0) = }""")
print(f"""Cone: {surface_area_cone(1_0, 2_0) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }""")
print(f"""Cylinder: {surface_area_cylinder(1_0, 2_0) = }""")
print(f"""Torus: {surface_area_torus(2_0, 1_0) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 1_0) = }""")
print(f"""Square: {area_reg_polygon(4, 1_0) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 1_0) = }""")
| 545
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a : List[str] = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ["PoolFormerFeatureExtractor"]
a : Union[str, Any] = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 679
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a : Optional[Any] = "pt"
elif is_tf_available():
a : List[Any] = "tf"
else:
a : List[Any] = "jax"
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : List[str] = False
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : List[str] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A_ ( self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def A_ ( self , **snake_case ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def A_ ( self , snake_case , snake_case=False , snake_case=2_0 , snake_case=5 ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for i in range(len(snake_case ) ):
try:
UpperCAmelCase : int = tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase : Optional[int] = list(filter(lambda snake_case : re.match(r"^[ a-zA-Z]+$" , t[1] ) , snake_case ) )
UpperCAmelCase : Any = list(filter(lambda snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case ) , snake_case ) )
if max_length is not None and len(snake_case ) > max_length:
UpperCAmelCase : Optional[Any] = toks[:max_length]
if min_length is not None and len(snake_case ) < min_length and len(snake_case ) > 0:
while len(snake_case ) < min_length:
UpperCAmelCase : Any = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase : Dict = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase : Any = tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case )
if " " not in output_txt and len(snake_case ) > 1:
UpperCAmelCase : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case )
)
if with_prefix_space:
UpperCAmelCase : Union[str, Any] = " " + output_txt
UpperCAmelCase : Dict = tokenizer.encode(snake_case , add_special_tokens=snake_case )
return output_txt, output_ids
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.perceiver_tokenizer
UpperCAmelCase : Tuple = "Unicode €."
UpperCAmelCase : int = tokenizer(snake_case )
UpperCAmelCase : Tuple = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Optional[Any] = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]Unicode €.[SEP]" )
UpperCAmelCase : Tuple = tokenizer("e è é ê ë" )
UpperCAmelCase : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Dict = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
UpperCAmelCase : List[str] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
UpperCAmelCase : Dict = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
self.assertIsInstance(snake_case , snake_case )
if FRAMEWORK != "jax":
UpperCAmelCase : List[Any] = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(snake_case , snake_case )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase : List[Any] = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , snake_case )
self.assertIn("attention_mask" , snake_case )
self.assertNotIn("decoder_input_ids" , snake_case )
self.assertNotIn("decoder_attention_mask" , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : int = [
"Summary of the text.",
"Another summary.",
]
UpperCAmelCase : List[Any] = tokenizer(
text_target=snake_case , max_length=3_2 , padding="max_length" , truncation=snake_case , return_tensors=snake_case )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
UpperCAmelCase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : Dict = tempfile.mkdtemp()
UpperCAmelCase : Any = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase : int = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : List[str] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
shutil.rmtree(snake_case )
UpperCAmelCase : Dict = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : str = tempfile.mkdtemp()
UpperCAmelCase : int = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
UpperCAmelCase : int = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
UpperCAmelCase : Optional[int] = tokenizer.__class__.from_pretrained(snake_case , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case )
with open(os.path.join(snake_case , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Union[str, Any] = json.load(snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Any = json.load(snake_case )
UpperCAmelCase : str = [f"<extra_id_{i}>" for i in range(1_2_5 )]
UpperCAmelCase : List[Any] = added_tokens_extra_ids + [
"an_additional_special_token"
]
UpperCAmelCase : List[str] = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(snake_case , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase : Optional[Any] = tokenizer_class.from_pretrained(
snake_case , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase : Optional[int] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=snake_case )]
UpperCAmelCase : Optional[int] = tokenizer_class.from_pretrained(
snake_case , additional_special_tokens=snake_case , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , "�" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.get_tokenizers(fast=snake_case , do_lower_case=snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase : List[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
UpperCAmelCase : int = tokenizer.convert_tokens_to_string(snake_case )
self.assertIsInstance(snake_case , snake_case )
| 679
| 1
|
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Optional[int] = ''''''
A__ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
A__ : str = None # compression type in fsspec. ex: "gzip"
A__ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Dict , __lowerCamelCase : str = "" , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[dict] = None , **__lowerCamelCase : str ):
"""simple docstring"""
super().__init__(self , **__lowerCamelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_snake_case = fsspec.open(
__lowerCamelCase , mode='''rb''' , protocol=__lowerCamelCase , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_snake_case = os.path.basename(self.file.path.split('''::''' )[0] )
_snake_case = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
_snake_case = None
@classmethod
def __UpperCAmelCase ( cls : int , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__lowerCamelCase ).lstrip('''/''' )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
if self.dir_cache is None:
_snake_case = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
_snake_case = {f['''name''']: f}
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : str ):
"""simple docstring"""
return self.file.open().read()
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : str = "rb" , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Any , ):
"""simple docstring"""
_snake_case = self._strip_protocol(__lowerCamelCase )
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Optional[int] = '''bz2'''
A__ : List[str] = '''bz2'''
A__ : Union[str, Any] = '''.bz2'''
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Dict = '''gzip'''
A__ : List[str] = '''gzip'''
A__ : str = '''.gz'''
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Optional[int] = '''lz4'''
A__ : Optional[int] = '''lz4'''
A__ : Optional[Any] = '''.lz4'''
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Any = '''xz'''
A__ : Optional[int] = '''xz'''
A__ : List[str] = '''.xz'''
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Optional[int] = '''zstd'''
A__ : Optional[Any] = '''zstd'''
A__ : str = '''.zst'''
def __init__( self : Any , __lowerCamelCase : str , __lowerCamelCase : str = "rb" , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[dict] = None , __lowerCamelCase : int = DEFAULT_BLOCK_SIZE , **__lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(
fo=__lowerCamelCase , mode=__lowerCamelCase , target_protocol=__lowerCamelCase , target_options=__lowerCamelCase , block_size=__lowerCamelCase , **__lowerCamelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_snake_case = self.file.__enter__
class UpperCAmelCase :
def __init__( self : Any , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = file_
def __enter__( self : Union[str, Any] ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : List[str] , *__lowerCamelCase : Any , **__lowerCamelCase : Tuple ):
"""simple docstring"""
self._file.__exit__(*__lowerCamelCase , **__lowerCamelCase )
def __iter__( self : List[Any] ):
"""simple docstring"""
return iter(self._file )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
return next(self._file )
def __getattr__( self : Union[str, Any] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return getattr(self._file , __lowerCamelCase )
def fixed_enter(*__lowerCamelCase : List[str] , **__lowerCamelCase : List[Any] ):
return WrappedFile(_enter(*__lowerCamelCase , **__lowerCamelCase ) )
_snake_case = fixed_enter
| 404
|
"""simple docstring"""
from __future__ import annotations
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 404
| 1
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase : Dict = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> int:
if os.path.exists(__A ):
if os.path.exists(os.path.join(__A , 'config.json' ) ) and os.path.isfile(
os.path.join(__A , 'config.json' ) ):
os.remove(os.path.join(__A , 'config.json' ) )
if os.path.exists(os.path.join(__A , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__A , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__A , 'pytorch_model.bin' ) )
else:
os.makedirs(__A )
model.save_pretrained(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A=False ) -> int:
_snake_case = 2
if unlogit:
_snake_case = torch.pow(__A , __A )
_snake_case = p * torch.log(__A )
_snake_case = 0
return -plogp.sum(dim=-1 )
def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[int]:
logger.info('lv, h >\t' + '\t'.join(F'{x + 1}' for x in range(len(__A ) ) ) )
for row in range(len(__A ) ):
if tensor.dtype != torch.long:
logger.info(F'layer {row + 1}:\t' + '\t'.join(F'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(F'layer {row + 1}:\t' + '\t'.join(F'{x:d}' for x in tensor[row].cpu().data ) )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A=True , __A=True , __A=None , __A=False ) -> Tuple:
_snake_case , _snake_case = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case = torch.zeros(__A , __A ).to(args.device )
_snake_case = torch.zeros(__A , __A ).to(args.device )
if head_mask is None:
_snake_case = torch.ones(__A , __A ).to(args.device )
head_mask.requires_grad_(requires_grad=__A )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case = None
_snake_case = 0.0
_snake_case = 0.0
for step, inputs in enumerate(tqdm(__A , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case = tuple(t.to(args.device ) for t in inputs )
((_snake_case ) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case = model(__A , labels=__A , head_mask=__A )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__A ):
_snake_case = entropy(attn.detach() , __A )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__A ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case = 2
_snake_case = torch.pow(torch.pow(__A , __A ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_snake_case = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__A )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__A )
logger.info('Head ranked by importance scores' )
_snake_case = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case = torch.arange(
head_importance.numel() , device=args.device )
_snake_case = head_ranks.view_as(__A )
print_ad_tensor(__A )
return attn_entropy, head_importance, total_loss
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Optional[Any]:
_snake_case , _snake_case , _snake_case = compute_heads_importance(__A , __A , __A , compute_entropy=__A )
_snake_case = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __A , original_score * args.masking_threshold )
_snake_case = torch.ones_like(__A )
_snake_case = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case = float('Inf' )
_snake_case = head_importance.view(-1 ).sort()[1]
if len(__A ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case = new_head_mask.view(-1 )
_snake_case = 0.0
_snake_case = new_head_mask.view_as(__A )
_snake_case = new_head_mask.clone().detach()
print_ad_tensor(__A )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , head_mask=__A )
_snake_case = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('Final head mask' )
print_ad_tensor(__A )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A ) -> Optional[Any]:
_snake_case = datetime.now()
_snake_case , _snake_case , _snake_case = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A )
_snake_case = 1 / loss
_snake_case = datetime.now() - before_time
_snake_case = sum(p.numel() for p in model.parameters() )
_snake_case = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A ) )
}
for k, v in heads_to_prune.items():
if isinstance(__A , __A ):
_snake_case = [
v,
]
assert sum(len(__A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__A )
_snake_case = sum(p.numel() for p in model.parameters() )
_snake_case = datetime.now()
_snake_case , _snake_case , _snake_case = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , )
_snake_case = 1 / loss
_snake_case = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __A , __A , pruned_num_params / original_num_params * 100 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __A , __A )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 )
save_model(__A , args.output_dir )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__A , type=__A , required=__A , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__A , type=__A , required=__A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__A , type=__A , required=__A , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__A , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__A , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__A , type=__A , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__A , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__A , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__A , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__A , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=128 , type=__A , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__A , help='Batch size.' )
parser.add_argument('--seed' , type=__A , default=42 )
parser.add_argument('--local_rank' , type=__A , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__A , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__A , default='' , help='Can be used for distant debugging.' )
_snake_case = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case = torch.device('cuda' , args.local_rank )
_snake_case = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case = nn.parallel.DistributedDataParallel(
__A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A )
elif args.n_gpu > 1:
_snake_case = nn.DataParallel(__A )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__A )
torch.save(__A , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __A )
# Prepare dataset
_snake_case = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case = (torch.from_numpy(__A ),)
_snake_case = TensorDataset(*__A )
_snake_case = RandomSampler(__A )
_snake_case = DataLoader(__A , sampler=__A , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__A , __A , __A )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case = mask_heads(__A , __A , __A )
prune_heads(__A , __A , __A , __A )
if __name__ == "__main__":
main()
| 495
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_snake_case : Any = logging.get_logger(__name__)
class UpperCamelCase_ ( __a ):
'''simple docstring'''
UpperCamelCase : Any = ['''input_features''', '''attention_mask''']
def __init__( self :List[Any] , lowerCAmelCase__ :List[Any]=80 , lowerCAmelCase__ :int=16000 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :List[Any]=25 , lowerCAmelCase__ :List[str]="hamming_window" , lowerCAmelCase__ :int=3_27_68.0 , lowerCAmelCase__ :str=0.97 , lowerCAmelCase__ :List[str]=1.0 , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :str=False , **lowerCAmelCase__ :Optional[Any] , ) ->Union[str, Any]:
super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase = feature_size
lowercase = sampling_rate
lowercase = padding_value
lowercase = hop_length
lowercase = win_length
lowercase = frame_signal_scale
lowercase = preemphasis_coeff
lowercase = mel_floor
lowercase = normalize_means
lowercase = normalize_vars
lowercase = win_function
lowercase = return_attention_mask
lowercase = win_length * sampling_rate // 1000
lowercase = hop_length * sampling_rate // 1000
lowercase = optimal_fft_length(self.sample_size )
lowercase = (self.n_fft // 2) + 1
def SCREAMING_SNAKE_CASE( self :Union[str, Any] , lowerCAmelCase__ :np.array ) ->np.ndarray:
if self.win_function == "hamming_window":
lowercase = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCAmelCase__ )
else:
lowercase = window_function(window_length=self.sample_size , name=self.win_function )
lowercase = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
lowercase = spectrogram(
one_waveform * self.frame_signal_scale , window=lowerCAmelCase__ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=lowerCAmelCase__ , preemphasis=self.preemphasis_coeff , mel_filters=lowerCAmelCase__ , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def SCREAMING_SNAKE_CASE( self :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict ) ->int:
# make sure we normalize float32 arrays
if self.normalize_means:
lowercase = x[:input_length].mean(axis=0 )
lowercase = np.subtract(lowerCAmelCase__ , lowerCAmelCase__ )
if self.normalize_vars:
lowercase = x[:input_length].std(axis=0 )
lowercase = np.divide(lowerCAmelCase__ , lowerCAmelCase__ )
if input_length < x.shape[0]:
lowercase = padding_value
# make sure array is in float32
lowercase = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE( self :Optional[Any] , lowerCAmelCase__ :List[np.ndarray] , lowerCAmelCase__ :Optional[np.ndarray] = None ) ->List[np.ndarray]:
lowercase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowerCAmelCase__ , lowerCAmelCase__ , self.padding_value ) for x, n in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
def __call__( self :Any , lowerCAmelCase__ :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase__ :Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , lowerCAmelCase__ :Optional[int] = None , **lowerCAmelCase__ :Union[str, Any] , ) ->BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
lowercase = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase = [raw_speech]
# extract fbank features
lowercase = [self._extract_mfsc_features(lowerCAmelCase__ ) for one_waveform in raw_speech]
# convert into correct format for padding
lowercase = BatchFeature({"input_features": features} )
lowercase = self.pad(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
# make sure list is in array format
lowercase = padded_inputs.get("input_features" )
if isinstance(input_features[0] , lowerCAmelCase__ ):
lowercase = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in input_features]
lowercase = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowercase = [np.asarray(lowerCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowercase = (
np.array(lowerCAmelCase__ , dtype=np.intaa )
if self._get_padding_strategies(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowercase = self.normalize(
padded_inputs["input_features"] , attention_mask=lowerCAmelCase__ )
if return_tensors is not None:
lowercase = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
| 441
| 0
|
import torch
def _A ( ):
"""simple docstring"""
if torch.cuda.is_available():
lowerCAmelCase__ = torch.cuda.device_count()
else:
lowerCAmelCase__ = 0
print(F'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 715
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = (PNDMScheduler,)
snake_case__ = (("num_inference_steps", 5_0),)
def a ( self : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]:
lowerCAmelCase__ = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str=0 , **SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowerCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowerCAmelCase__ = dummy_past_residuals[:]
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a ( self : Dict ) -> Any:
pass
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=0 , **SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase__ = dummy_past_residuals[:]
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a ( self : List[str] , **SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = 10
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def a ( self : Optional[int] ) -> List[str]:
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__ , "set_timesteps" ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__ , "set_timesteps" ):
lowerCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase__ = dummy_past_residuals[:]
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , 1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , 1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def a ( self : Tuple ) -> int:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> List[str]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def a ( self : List[str] ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def a ( self : Any ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def a ( self : str ) -> Union[str, Any]:
for t in [1, 5, 10]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> List[str]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
lowerCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowerCAmelCase__ = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
def a ( self : Union[str, Any] ) -> Optional[Any]:
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def a ( self : Any ) -> Tuple:
lowerCAmelCase__ = self.full_loop()
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 198.1_318 ) < 1e-2
assert abs(result_mean.item() - 0.2_580 ) < 1e-3
def a ( self : int ) -> Dict:
lowerCAmelCase__ = self.full_loop(prediction_type="v_prediction" )
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 67.3_986 ) < 1e-2
assert abs(result_mean.item() - 0.0_878 ) < 1e-3
def a ( self : Any ) -> Tuple:
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase__ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 230.0_399 ) < 1e-2
assert abs(result_mean.item() - 0.2_995 ) < 1e-3
def a ( self : int ) -> List[Any]:
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase__ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 186.9_482 ) < 1e-2
assert abs(result_mean.item() - 0.2_434 ) < 1e-3
| 125
| 0
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def lowerCamelCase__ ( a : Any , a : int , a : Union[str, Any]=1_024 , a : Tuple=1_024 , a : Union[str, Any]=False , **a : Dict ) -> List[Any]:
"""simple docstring"""
a__ :Dict = AutoTokenizer.from_pretrained(a )
a__ :int = SeqaSeqDataset(a , a , a , a , type_path="train" , **a )
a__ :List[Any] = tok.pad_token_id
def get_lens(a : Dict ):
a__ :List[str] = tqdm(
DataLoader(a , batch_size=512 , num_workers=8 , shuffle=a , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
a__ :Dict = []
for batch in dl:
a__ :Any = batch["input_ids"].ne(a ).sum(1 ).tolist()
a__ :Dict = batch["labels"].ne(a ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(a , a ):
max_lens.append(max(a , a ) )
else:
max_lens.extend(a )
return max_lens
a__ :Optional[Any] = get_lens(a )
a__ :Union[str, Any] = SeqaSeqDataset(a , a , a , a , type_path="val" , **a )
a__ :Optional[Any] = get_lens(a )
pickle_save(a , train_ds.len_file )
pickle_save(a , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 395
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
snake_case__ = logging.get_logger(__name__)
class lowerCAmelCase_ ( _a):
def __init__( self : Union[str, Any] , __A : int , __A : int , __A : float , **__A : Tuple ) ->Union[str, Any]:
"""simple docstring"""
a__ :Any = feature_size
a__ :int = sampling_rate
a__ :List[str] = padding_value
a__ :str = kwargs.pop("padding_side" , "right" )
a__ :Any = kwargs.pop("return_attention_mask" , __A )
super().__init__(**__A )
def _snake_case ( self : str , __A : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __A : Union[bool, str, PaddingStrategy] = True , __A : Optional[int] = None , __A : bool = False , __A : Optional[int] = None , __A : Optional[bool] = None , __A : Optional[Union[str, TensorType]] = None , ) ->BatchFeature:
"""simple docstring"""
if isinstance(__A , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
a__ :Dict = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F''' to this method that includes {self.model_input_names[0]}, but you provided'''
F''' {list(processed_features.keys() )}''' )
a__ :int = processed_features[self.model_input_names[0]]
a__ :str = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__A ) == 0:
if return_attention_mask:
a__ :Optional[int] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
a__ :int = required_input[0]
if isinstance(__A , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
a__ :Any = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__A ):
a__ :Tuple = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__A ):
a__ :Optional[Any] = "tf"
elif is_torch_tensor(__A ):
a__ :Optional[Any] = "pt"
elif isinstance(__A , (int, float, list, tuple, np.ndarray) ):
a__ :Union[str, Any] = "np"
else:
raise ValueError(
F'''type of {first_element} unknown: {type(__A )}. '''
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
a__ :Optional[Any] = to_numpy(__A )
else:
a__ :Union[str, Any] = [to_numpy(__A ) for v in value]
# Convert padding_strategy in PaddingStrategy
a__ :Optional[int] = self._get_padding_strategies(padding=__A , max_length=__A )
a__ :int = processed_features[self.model_input_names[0]]
a__ :Union[str, Any] = len(__A )
if not all(len(__A ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
a__ :int = []
for i in range(__A ):
a__ :Optional[int] = {k: v[i] for k, v in processed_features.items()}
# truncation
a__ :Tuple = self._truncate(
__A , max_length=__A , pad_to_multiple_of=__A , truncation=__A , )
truncated_inputs.append(__A )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
a__ :str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
a__ :str = PaddingStrategy.MAX_LENGTH
a__ :str = {}
for i in range(__A ):
# padding
a__ :Optional[int] = self._pad(
truncated_inputs[i] , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
for key, value in outputs.items():
if key not in batch_outputs:
a__ :Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
a__ :List[Any] = value.astype(np.floataa )
batch_outputs[key].append(__A )
return BatchFeature(__A , tensor_type=__A )
def _snake_case ( self : List[Any] , __A : Union[Dict[str, np.ndarray], BatchFeature] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ) ->dict:
"""simple docstring"""
a__ :Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
a__ :List[Any] = len(__A )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a__ :List[str] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a__ :int = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__A ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
a__ :Dict = np.ones(len(__A ) , dtype=np.intaa )
if needs_to_be_padded:
a__ :List[str] = max_length - len(__A )
if self.padding_side == "right":
if return_attention_mask:
a__ :List[Any] = np.pad(
processed_features["attention_mask"] , (0, difference) )
a__ :List[Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
a__ :Any = np.pad(
__A , __A , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
a__ :Dict = np.pad(
processed_features["attention_mask"] , (difference, 0) )
a__ :List[str] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
a__ :List[str] = np.pad(
__A , __A , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def _snake_case ( self : Tuple , __A : Union[Dict[str, np.ndarray], BatchFeature] , __A : Optional[int] = None , __A : Optional[int] = None , __A : Optional[bool] = None , ) ->Optional[Any]:
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
a__ :str = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a__ :Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a__ :List[Any] = len(__A ) > max_length
if needs_to_be_truncated:
a__ :Optional[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
a__ :List[Any] = processed_features["attention_mask"][:max_length]
return processed_features
def _snake_case ( self : Union[str, Any] , __A : int=False , __A : Dict=None ) ->Optional[int]:
"""simple docstring"""
if padding is not False:
if padding is True:
a__ :Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__A , __A ):
a__ :Union[str, Any] = PaddingStrategy(__A )
elif isinstance(__A , __A ):
a__ :Union[str, Any] = padding
else:
a__ :Optional[Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 395
| 1
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
_lowercase : Dict = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8000,
"sample_size": 6_5536,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8000,
"sample_size": 6_5536,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8000,
"sample_size": 13_1072,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
}
def _lowerCAmelCase ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ) -> List[Any]:
"""simple docstring"""
return torch.atana(UpperCamelCase__ , UpperCamelCase__ ) / math.pi * 2
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] ) -> Tuple:
"""simple docstring"""
A = torch.sin(t * math.pi / 2 ) ** 2
A = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(UpperCamelCase__ , UpperCamelCase__ )
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
pass
class _UpperCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , a__ ) -> List[Any]:
super().__init__()
A = DiffusionAttnUnetaD(a__ , n_attn_layers=4 )
A = deepcopy(self.diffusion )
A = torch.quasirandom.SobolEngine(1 , scramble=a__ )
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
A = MODELS_MAP[model_name]["""url"""]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
_lowercase : int = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
_lowercase : Union[str, Any] = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
_lowercase : Dict = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
_lowercase : List[str] = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
_lowercase : Any = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
_lowercase : Dict = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] ) -> List[str]:
"""simple docstring"""
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def _lowerCAmelCase ( UpperCamelCase__: Any ) -> Optional[Any]:
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(UpperCamelCase__ ) and not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return name.replace(UpperCamelCase__ , UpperCamelCase__ )
elif name.startswith(UpperCamelCase__ ):
return [name.replace(UpperCamelCase__ , UpperCamelCase__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def _lowerCAmelCase ( UpperCamelCase__: Dict , UpperCamelCase__: Optional[int]=13 ) -> Union[str, Any]:
"""simple docstring"""
A = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
A = 0
if string.startswith("""net.3.""" ):
depth += 1
A = string[6:]
elif string.startswith("""net.""" ):
A = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
A = string[7:]
if string.startswith("""main.""" ):
A = string[5:]
# mid block
if string[:2].isdigit():
A = string[:2]
A = string[2:]
else:
A = string[0]
A = string[1:]
if depth == max_depth:
A = MID_NUM_TO_LAYER[layer_num]
A = """mid_block"""
elif depth > 0 and int(UpperCamelCase__ ) < 7:
A = DOWN_NUM_TO_LAYER[layer_num]
A = f'down_blocks.{depth}'
elif depth > 0 and int(UpperCamelCase__ ) > 7:
A = UP_NUM_TO_LAYER[layer_num]
A = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
A = DEPTH_0_TO_LAYER[layer_num]
A = f'up_blocks.{max_depth - 1}' if int(UpperCamelCase__ ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
A = string_left[1:]
if "resnets" in new_layer:
A = convert_resconv_naming(UpperCamelCase__ )
elif "attentions" in new_layer:
A = convert_attn_naming(UpperCamelCase__ )
A = new_string_left
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A = prefix + """.""" + new_layer + """.""" + string_left
else:
A = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def _lowerCAmelCase ( UpperCamelCase__: Optional[Any] ) -> Dict:
"""simple docstring"""
A = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
A = rename(UpperCamelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A = transform_conv_attns(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A = v
return new_state_dict
def _lowerCAmelCase ( UpperCamelCase__: Dict , UpperCamelCase__: Tuple , UpperCamelCase__: str ) -> Union[str, Any]:
"""simple docstring"""
if len(UpperCamelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
A = v[:, :, 0]
else:
# bias
A = v
else:
# qkv matrices
A = v.shape[0]
A = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
A = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
A = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def _lowerCAmelCase ( UpperCamelCase__: Optional[int] ) -> List[Any]:
"""simple docstring"""
A = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
A = download(UpperCamelCase__ )
A = MODELS_MAP[model_name]["""sample_rate"""]
A = MODELS_MAP[model_name]["""sample_size"""]
A = Object()
A = sample_size
A = sample_rate
A = 0
A = UNetaDModel(sample_size=UpperCamelCase__ , sample_rate=UpperCamelCase__ )
A = diffusers_model.state_dict()
A = DiffusionUncond(UpperCamelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=UpperCamelCase__ )["""state_dict"""] )
A = orig_model.diffusion_ema.eval()
A = orig_model.state_dict()
A = rename_orig_weights(UpperCamelCase__ )
A = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
A = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(UpperCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("""kernel""" ) for k in list(UpperCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
A = value.squeeze()
A = value
diffusers_model.load_state_dict(UpperCamelCase__ )
A = 1_00
A = 33
A = IPNDMScheduler(num_train_timesteps=UpperCamelCase__ )
A = torch.manual_seed(UpperCamelCase__ )
A = torch.randn([1, 2, config.sample_size] , generator=UpperCamelCase__ ).to(UpperCamelCase__ )
A = torch.linspace(1 , 0 , steps + 1 , device=UpperCamelCase__ )[:-1]
A = get_crash_schedule(UpperCamelCase__ )
A = DanceDiffusionPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
A = torch.manual_seed(33 )
A = pipe(num_inference_steps=UpperCamelCase__ , generator=UpperCamelCase__ ).audios
A = sampling.iplms_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {} )
A = generated.clamp(-1 , 1 )
A = (generated - audio).abs().sum()
A = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , UpperCamelCase__ )
print("""Diff max""" , UpperCamelCase__ )
assert diff_max < 1e-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
_lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
_lowercase : Union[str, Any] = parser.parse_args()
main(args)
| 546
|
def _lowerCAmelCase ( UpperCamelCase__: int ) -> bool:
"""simple docstring"""
return str(UpperCamelCase__ ) == str(UpperCamelCase__ )[::-1]
def _lowerCAmelCase ( UpperCamelCase__: int ) -> int:
"""simple docstring"""
return int(UpperCamelCase__ ) + int(str(UpperCamelCase__ )[::-1] )
def _lowerCAmelCase ( UpperCamelCase__: int = 1_00_00 ) -> int:
"""simple docstring"""
A = []
for num in range(1 , UpperCamelCase__ ):
A = 0
A = num
while iterations < 50:
A = sum_reverse(UpperCamelCase__ )
iterations += 1
if is_palindrome(UpperCamelCase__ ):
break
else:
lychrel_nums.append(UpperCamelCase__ )
return len(UpperCamelCase__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 546
| 1
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> tuple[str, float]:
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif stress < 0:
raise ValueError("""Stress cannot be negative""" )
elif tangential_force < 0:
raise ValueError("""Tangential Force cannot be negative""" )
elif area < 0:
raise ValueError("""Area cannot be negative""" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 567
|
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = (CMStochasticIterativeScheduler,)
lowercase__ = 10
def _UpperCAmelCase ( self : int , **lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCAmelCase_)
return config
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = 1_0
lowercase_ = self.get_scheduler_config()
lowercase_ = self.scheduler_classes[0](**lowerCAmelCase_)
scheduler.set_timesteps(lowerCAmelCase_)
lowercase_ = scheduler.timesteps[0]
lowercase_ = scheduler.timesteps[1]
lowercase_ = self.dummy_sample
lowercase_ = 0.1 * sample
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).prev_sample
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = 1
scheduler.set_timesteps(lowerCAmelCase_)
lowercase_ = scheduler.timesteps
lowercase_ = torch.manual_seed(0)
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCAmelCase_):
# 1. scale model input
lowercase_ = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_)
# 2. predict noise residual
lowercase_ = model(lowerCAmelCase_ , lowerCAmelCase_)
# 3. predict previous sample x_t-1
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(lowerCAmelCase_))
lowercase_ = torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 192.7_614) < 1E-2
assert abs(result_mean.item() - 0.2_510) < 1E-3
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = [1_0_6, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_)
lowercase_ = scheduler.timesteps
lowercase_ = torch.manual_seed(0)
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase_ = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_)
# 2. predict noise residual
lowercase_ = model(lowerCAmelCase_ , lowerCAmelCase_)
# 3. predict previous sample x_t-1
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(lowerCAmelCase_))
lowercase_ = torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 347.6_357) < 1E-2
assert abs(result_mean.item() - 0.4_527) < 1E-3
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = [3_9, 3_0, 1_2, 1_5, 0]
with self.assertRaises(lowerCAmelCase_ , msg="""`timesteps` must be in descending order."""):
scheduler.set_timesteps(timesteps=lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = [3_9, 3_0, 1_2, 1, 0]
lowercase_ = len(lowerCAmelCase_)
with self.assertRaises(lowerCAmelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`."""):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_)
| 567
| 1
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase=False):
try:
UpperCamelCase_ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase_ = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase_ = strtobool(_lowerCAmelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""")
return _value
UpperCAmelCase : Dict =parse_flag_from_env("""RUN_SLOW""", default=False)
UpperCAmelCase : List[Any] =parse_flag_from_env("""RUN_REMOTE""", default=False)
UpperCAmelCase : Dict =parse_flag_from_env("""RUN_LOCAL""", default=True)
UpperCAmelCase : Tuple =parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
UpperCAmelCase : Optional[int] =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
UpperCAmelCase : Any =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
UpperCAmelCase : Union[str, Any] =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
UpperCAmelCase : int =pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
UpperCAmelCase : Optional[int] =pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
UpperCAmelCase : Union[str, Any] =pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
UpperCAmelCase : Union[str, Any] =pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def _lowerCAmelCase (_lowerCAmelCase):
try:
import faiss # noqa
except ImportError:
UpperCamelCase_ = unittest.skip("test requires faiss")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
try:
import regex # noqa
except ImportError:
UpperCamelCase_ = unittest.skip("test requires regex")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase_ = unittest.skip("test requires elasticsearch")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase_ = unittest.skip("test requires sqlalchemy")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
if not config.TORCH_AVAILABLE:
UpperCamelCase_ = unittest.skip("test requires PyTorch")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
if not config.TF_AVAILABLE:
UpperCamelCase_ = unittest.skip("test requires TensorFlow")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
if not config.JAX_AVAILABLE:
UpperCamelCase_ = unittest.skip("test requires JAX")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
if not config.PIL_AVAILABLE:
UpperCamelCase_ = unittest.skip("test requires Pillow")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers")(_lowerCAmelCase)
else:
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken")(_lowerCAmelCase)
else:
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy")(_lowerCAmelCase)
else:
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
def _require_spacy_model(_lowerCAmelCase):
try:
import spacy # noqa F401
spacy.load(_lowerCAmelCase)
except ImportError:
return unittest.skip("test requires spacy")(_lowerCAmelCase)
except OSError:
return unittest.skip("test requires spacy model '{}'".format(_lowerCAmelCase))(_lowerCAmelCase)
else:
return test_case
return _require_spacy_model
def _lowerCAmelCase (_lowerCAmelCase):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark")(_lowerCAmelCase)
else:
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark")(_lowerCAmelCase)
else:
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase_ = unittest.skip("test is slow")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase_ = unittest.skip("test is local")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase_ = unittest.skip("test is packaged")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (_lowerCAmelCase):
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase_ = unittest.skip("test requires remote")(_lowerCAmelCase)
return test_case
def _lowerCAmelCase (*_lowerCAmelCase):
def decorate(cls):
for name, fn in cls.__dict__.items():
if callable(_lowerCAmelCase) and name.startswith("test"):
for decorator in decorators:
UpperCamelCase_ = decorator(_lowerCAmelCase)
setattr(cls , _lowerCAmelCase , _lowerCAmelCase)
return cls
return decorate
class _lowercase (a_ ):
'''simple docstring'''
pass
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = 1
lowercase__ = 2
@contextmanager
def _lowerCAmelCase (_lowerCAmelCase=OfflineSimulationMode.CONNECTION_FAILS , _lowerCAmelCase=1e-16):
UpperCamelCase_ = requests.Session().request
def timeout_request(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase_ = "https://10.255.255.1"
if kwargs.get("timeout") is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""")
UpperCamelCase_ = timeout
try:
return online_request(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase_ = url
UpperCamelCase_ = e.args[0]
UpperCamelCase_ = (max_retry_error.args[0].replace("10.255.255.1" , f"""OfflineMock[{url}]"""),)
UpperCamelCase_ = (max_retry_error,)
raise
def raise_connection_error(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase):
raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCAmelCase)
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , _lowerCAmelCase):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , _lowerCAmelCase):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCAmelCase):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum.")
@contextmanager
def _lowerCAmelCase (*_lowerCAmelCase , **_lowerCAmelCase):
UpperCamelCase_ = str(Path().resolve())
with tempfile.TemporaryDirectory(*_lowerCAmelCase , **_lowerCAmelCase) as tmp_dir:
try:
os.chdir(_lowerCAmelCase)
yield
finally:
os.chdir(_lowerCAmelCase)
@contextmanager
def _lowerCAmelCase ():
import gc
gc.collect()
UpperCamelCase_ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _lowerCAmelCase ():
import gc
gc.collect()
UpperCamelCase_ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
return deepcopy(_lowerCAmelCase).integers(0 , 1_00 , 10).tolist() == deepcopy(_lowerCAmelCase).integers(0 , 1_00 , 10).tolist()
def _lowerCAmelCase (_lowerCAmelCase):
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase):
try:
return func(*_lowerCAmelCase , **_lowerCAmelCase)
except HTTPError as err:
if str(_lowerCAmelCase).startswith("500") or str(_lowerCAmelCase).startswith("502"):
pytest.xfail(str(_lowerCAmelCase))
raise err
return decorator.decorator(_wrapper , _lowerCAmelCase)
class _lowercase :
'''simple docstring'''
def __init__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = returncode
UpperCamelCase_ = stdout
UpperCamelCase_ = stderr
async def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
while True:
UpperCamelCase_ = await stream.readline()
if line:
callback(_lowerCAmelCase)
else:
break
async def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False):
if echo:
print("\nRunning: " , " ".join(_lowerCAmelCase))
UpperCamelCase_ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase_ = []
UpperCamelCase_ = []
def tee(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=""):
UpperCamelCase_ = line.decode("utf-8").rstrip()
sink.append(_lowerCAmelCase)
if not quiet:
print(_lowerCAmelCase , _lowerCAmelCase , file=_lowerCAmelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCAmelCase: tee(_lowerCAmelCase , _lowerCAmelCase , sys.stdout , label="stdout:")),
_read_stream(p.stderr , lambda _lowerCAmelCase: tee(_lowerCAmelCase , _lowerCAmelCase , sys.stderr , label="stderr:")),
] , timeout=_lowerCAmelCase , )
return _RunOutput(await p.wait() , _lowerCAmelCase , _lowerCAmelCase)
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=1_80 , _lowerCAmelCase=False , _lowerCAmelCase=True):
UpperCamelCase_ = asyncio.get_event_loop()
UpperCamelCase_ = loop.run_until_complete(
_stream_subprocess(_lowerCAmelCase , env=_lowerCAmelCase , stdin=_lowerCAmelCase , timeout=_lowerCAmelCase , quiet=_lowerCAmelCase , echo=_lowerCAmelCase))
UpperCamelCase_ = " ".join(_lowerCAmelCase)
if result.returncode > 0:
UpperCamelCase_ = "\n".join(result.stderr)
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""")
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""")
return result
def _lowerCAmelCase ():
UpperCamelCase_ = os.environ.get("PYTEST_XDIST_WORKER" , "gw0")
UpperCamelCase_ = re.sub(r"^gw" , "" , _lowerCAmelCase , 0 , re.M)
return int(_lowerCAmelCase)
def _lowerCAmelCase ():
UpperCamelCase_ = 2_95_00
UpperCamelCase_ = pytest_xdist_worker_id()
return port + uniq_delta
| 504
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCAmelCase : Optional[Any] =pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"])
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
inspect_dataset(_lowerCAmelCase , _lowerCAmelCase)
UpperCamelCase_ = path + ".py"
assert script_name in os.listdir(_lowerCAmelCase)
assert "__pycache__" not in os.listdir(_lowerCAmelCase)
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning")
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
@pytest.mark.parametrize("path" , ["accuracy"])
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
inspect_metric(_lowerCAmelCase , _lowerCAmelCase)
UpperCamelCase_ = path + ".py"
assert script_name in os.listdir(_lowerCAmelCase)
assert "__pycache__" not in os.listdir(_lowerCAmelCase)
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase)
assert info.config_name == config_name
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
with pytest.raises(_lowerCAmelCase):
get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase)
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = get_dataset_config_names(_lowerCAmelCase)
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = get_dataset_infos(_lowerCAmelCase)
assert list(infos.keys()) == expected_configs
UpperCamelCase_ = expected_configs[0]
assert expected_config in infos
UpperCamelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = get_dataset_infos(_lowerCAmelCase)
assert expected_config in infos
UpperCamelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
with pytest.raises(_lowerCAmelCase):
get_dataset_split_names(_lowerCAmelCase , config_name=_lowerCAmelCase)
| 504
| 1
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase=None , __UpperCAmelCase=2 , )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = num_patches + 1
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = ViTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase__ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Any:
'''simple docstring'''
lowerCAmelCase__ = ViTForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase__ = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = ViTForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = ViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase__ = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = ViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
lowerCAmelCase__
) = config_and_inputs
lowerCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, unittest.TestCase ):
a_ =(
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a_ =(
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
a_ =True
a_ =False
a_ =False
a_ =False
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = ViTModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCAmelCase__ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = ViTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _a ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(lowerCAmelCase__ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCAmelCase__ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
lowerCAmelCase__ = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = ViTModel.from_pretrained("facebook/dino-vits8" ).to(lowerCAmelCase__ )
lowerCAmelCase__ = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCAmelCase__ , return_tensors="pt" )
lowerCAmelCase__ = inputs.pixel_values.to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(lowerCAmelCase__ , interpolate_pos_encoding=lowerCAmelCase__ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ )
lowerCAmelCase__ = torch.tensor(
[[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCAmelCase__ , return_tensors="pt" )
lowerCAmelCase__ = inputs.pixel_values.to(lowerCAmelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowerCAmelCase__ = model(lowerCAmelCase__ )
| 339
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """microsoft/speecht5_tts"""
_UpperCAmelCase = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
_UpperCAmelCase = """text_reader"""
_UpperCAmelCase = SpeechTaProcessor
_UpperCAmelCase = SpeechTaForTextToSpeech
_UpperCAmelCase = SpeechTaHifiGan
_UpperCAmelCase = ["""text"""]
_UpperCAmelCase = ["""audio"""]
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.post_processor is None:
SCREAMING_SNAKE_CASE_ : List[Any] = 'microsoft/speecht5_hifigan'
super().setup()
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.pre_processor(text=lowerCAmelCase__ , return_tensors='pt' , truncation=lowerCAmelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
SCREAMING_SNAKE_CASE_ : List[Any] = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor(embeddings_dataset[7_3_0_5]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
with torch.no_grad():
return self.post_processor(lowerCAmelCase__ ).cpu().detach()
| 101
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __lowerCAmelCase :
lowercase = MBartConfig
lowercase = {}
lowercase = "gelu"
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_mbart_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = TFMBartModel(config=__UpperCAmelCase ).get_decoder()
__UpperCamelCase = inputs_dict['input_ids']
__UpperCamelCase = input_ids[:1, :]
__UpperCamelCase = inputs_dict['attention_mask'][:1, :]
__UpperCamelCase = inputs_dict['head_mask']
__UpperCamelCase = 1
# first forward pass
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = outputs.to_tuple()
__UpperCamelCase = past_key_values[1]
def A ( snake_case :List[Any] , snake_case :str , snake_case :List[str] , snake_case :List[Any]=None , snake_case :List[Any]=None , snake_case :Optional[int]=None , snake_case :List[str]=None , snake_case :int=None , ) -> Dict:
if attention_mask is None:
__UpperCamelCase = tf.cast(tf.math.not_equal(snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFMBartModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
lowercase = [
" UN Chief Says There Is No Military Solution in Syria",
]
lowercase = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
lowercase = "facebook/mbart-large-en-ro"
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCAmelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.translate_src_text(**__UpperCAmelCase )
self.assertListEqual(self.expected_text , __UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.tokenizer(self.src_text , **__UpperCAmelCase , return_tensors='tf' )
__UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__UpperCamelCase = self.tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
return generated_words
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 293
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = KandinskyInpaintPipeline
lowercase = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
lowercase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
lowercase = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowercase = False
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return 32
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return 32
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return 100
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__UpperCamelCase = MultilingualCLIP(__UpperCAmelCase )
__UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__UpperCamelCase = UNetaDConditionModel(**__UpperCAmelCase )
return model
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = self.dummy_tokenizer
__UpperCamelCase = self.dummy_unet
__UpperCamelCase = self.dummy_movq
__UpperCamelCase = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=__UpperCAmelCase , )
__UpperCamelCase = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__UpperCAmelCase )
# create init_image
__UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('RGB' ).resize((256, 256) )
# create mask
__UpperCamelCase = np.ones((64, 64) , dtype=np.floataa )
__UpperCamelCase = 0
if str(__UpperCAmelCase ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCamelCase = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = 'cpu'
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**__UpperCAmelCase )
__UpperCamelCase = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__UpperCamelCase = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
__UpperCamelCase = output.images
__UpperCamelCase = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
__UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__UpperCamelCase = np.ones((768, 768) , dtype=np.floataa )
__UpperCamelCase = 0
__UpperCamelCase = 'a hat'
__UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCAmelCase )
__UpperCamelCase = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
__UpperCamelCase = pipeline.to(__UpperCAmelCase )
pipeline.set_progress_bar_config(disable=__UpperCAmelCase )
__UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase , __UpperCamelCase = pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__UpperCamelCase = pipeline(
__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
__UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 293
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : int = 400_0000 ):
'''simple docstring'''
lowercase__ : Optional[int] = []
lowercase__ , lowercase__ : Dict = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_lowerCAmelCase )
lowercase__ , lowercase__ : str = b, a + b
return sum(_lowerCAmelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 599
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase : str = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = ["PerceiverFeatureExtractor"]
_UpperCamelCase : Optional[int] = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_UpperCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 599
| 1
|
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
assert (
isinstance(__A , __A ) and number_of_steps > 0
), f"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
snake_case: Tuple = 1, 1
for _ in range(number_of_steps - 1 ):
snake_case: List[str] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase_ ( __A : dict , __A : str , __A : set , __A : set , __A : dict , __A : dict , __A : PriorityQueue , __A : dict , __A : float | int , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case: Any = cst_fwd.get(__A , np.inf )
snake_case: int = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case: Union[str, Any] = new_cost_f
snake_case: Tuple = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case: List[str] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase_ ( __A : str , __A : str , __A : dict , __A : dict ):
'''simple docstring'''
snake_case: Optional[Any] = -1
snake_case: Any = set()
snake_case: str = set()
snake_case: int = {source: 0}
snake_case: Dict = {destination: 0}
snake_case: int = {source: None}
snake_case: Union[str, Any] = {destination: None}
snake_case: PriorityQueue[Any] = PriorityQueue()
snake_case: PriorityQueue[Any] = PriorityQueue()
snake_case: Tuple = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case , snake_case: List[str] = queue_forward.get()
visited_forward.add(__A )
snake_case , snake_case: int = queue_backward.get()
visited_backward.add(__A )
snake_case: str = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
snake_case: Optional[Any] = pass_and_relaxation(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case: Any = shortest_distance
return shortest_path_distance
__UpperCAmelCase = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
__UpperCAmelCase = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 692
| 0
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 215
|
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__snake_case : List[Any] = get_logger(__name__)
class lowerCamelCase :
'''simple docstring'''
__snake_case = 'dummy_data'
__snake_case = 'datasets'
__snake_case = False
def __init__( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[Version, str] , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[List[Callable]] = None , ) -> str:
'''simple docstring'''
A__ : List[str] =0
A__ : int =dataset_name
A__ : Optional[int] =cache_dir
A__ : Optional[int] =use_local_dummy_data
A__ : Optional[Any] =config
# download_callbacks take a single url as input
A__ : List[Callable] =download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
A__ : Any =load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
A__ : List[str] =str(lowerCAmelCase_ )
# to be downloaded
A__ : Union[str, Any] =None
A__ : List[str] =None
@property
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if self._dummy_file is None:
A__ : Tuple =self.download_dummy_data()
return self._dummy_file
@property
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Tuple =(
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
A__ : List[str] =cached_path(
lowerCAmelCase_ , cache_dir=self.cache_dir , extract_compressed_file=lowerCAmelCase_ , force_extract=lowerCAmelCase_ )
return os.path.join(lowerCAmelCase_ , self.dummy_file_name )
@property
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if self._bucket_url is None:
A__ : Any =hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Dict , *lowerCAmelCase_ : Tuple ) -> List[str]:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
A__ : int =self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
A__ : Optional[int] =self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return self.create_dummy_data_dict(lowerCAmelCase_ , lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , (list, tuple) ):
return self.create_dummy_data_list(lowerCAmelCase_ , lowerCAmelCase_ )
else:
return self.create_dummy_data_single(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , *lowerCAmelCase_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
return self.download_and_extract(lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return self.download_and_extract(lowerCAmelCase_ )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Any ) -> Tuple:
'''simple docstring'''
return path
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return {}
def lowercase__ ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int ) -> Any:
'''simple docstring'''
A__ : Union[str, Any] ={}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
for single_url in single_urls:
download_callback(lowerCAmelCase_ )
else:
A__ : str =single_urls
download_callback(lowerCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Dict =[os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) ) for x in single_urls]
else:
A__ : List[str] =single_urls
A__ : Optional[int] =os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) )
A__ : Tuple =value
# make sure that values are unique
if all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
A__ : int ={key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =[]
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
A__ : str =all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , lowerCAmelCase_ ) ) for url in data_url )
A__ : List[Any] =all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
A__ : List[str] =[data_url[0]] * len(lowerCAmelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowerCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
A__ : Optional[int] =os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(lowerCAmelCase_ )
return dummy_data_list
def lowercase__ ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : int ) -> Optional[int]:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(lowerCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
A__ : List[Any] =os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(lowerCAmelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
def lowercase__ ( self : Any , lowerCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
def _iter_archive_members(lowerCAmelCase_ : int ):
# this preserves the order of the members inside the ZIP archive
A__ : Dict =Path(self.dummy_file ).parent
A__ : List[str] =path.relative_to(lowerCAmelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
A__ : int =zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(lowerCAmelCase_ )
A__ : Dict =Path(lowerCAmelCase_ )
A__ : Union[str, Any] =_iter_archive_members(lowerCAmelCase_ ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(lowerCAmelCase_ ).as_posix(), file_path.open("""rb""" )
def lowercase__ ( self : int , lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : str =[paths]
for path in paths:
if os.path.isfile(lowerCAmelCase_ ):
if os.path.basename(lowerCAmelCase_ ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowerCAmelCase_ ):
if os.path.basename(lowerCAmelCase_ ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(lowerCAmelCase_ ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
| 215
| 1
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
UpperCamelCase : Any = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any]):
"""simple docstring"""
a : int = TOKEN
HfFolder.save_token(UpperCAmelCase_)
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str]):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-model-flax')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-model-flax-org')
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7)
a : List[str] = FlaxBertModel(UpperCAmelCase_)
model.push_to_hub('test-model-flax' , use_auth_token=self._token)
a : Tuple = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""")
a : Optional[int] = flatten_dict(unfreeze(model.params))
a : int = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
a : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase_ , 1e-3 , msg=f"""{key} not identical""")
# Reset repo
delete_repo(token=self._token , repo_id='test-model-flax')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase_ , repo_id='test-model-flax' , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token)
a : Tuple = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""")
a : str = flatten_dict(unfreeze(model.params))
a : int = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
a : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase_ , 1e-3 , msg=f"""{key} not identical""")
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Optional[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7)
a : Optional[Any] = FlaxBertModel(UpperCAmelCase_)
model.push_to_hub('valid_org/test-model-flax-org' , use_auth_token=self._token)
a : List[Any] = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org')
a : Optional[Any] = flatten_dict(unfreeze(model.params))
a : Optional[int] = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
a : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase_ , 1e-3 , msg=f"""{key} not identical""")
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-model-flax-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
UpperCAmelCase_ , repo_id='valid_org/test-model-flax-org' , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token)
a : str = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org')
a : List[Any] = flatten_dict(unfreeze(model.params))
a : Any = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
a : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase_ , 1e-3 , msg=f"""{key} not identical""")
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : str ) -> Optional[int]:
"""simple docstring"""
a : Optional[Any] = True
a : Tuple = flatten_dict(modela.params )
a : List[Any] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
a : List[str] = False
return models_are_equal
@require_flax
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : int = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only')
a : Union[str, Any] = FlaxBertModel(UpperCAmelCase_)
a : Optional[Any] = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCAmelCase_ , UpperCAmelCase_))
with self.assertRaises(UpperCAmelCase_):
a : List[Any] = FlaxBertModel.from_pretrained(UpperCAmelCase_)
a : Dict = FlaxBertModel.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_)
self.assertTrue(check_models_equal(UpperCAmelCase_ , UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only')
a : Tuple = FlaxBertModel(UpperCAmelCase_)
a : Dict = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCAmelCase_ , UpperCAmelCase_) , max_shard_size='10KB')
with self.assertRaises(UpperCAmelCase_):
a : List[str] = FlaxBertModel.from_pretrained(UpperCAmelCase_)
a : Optional[Any] = FlaxBertModel.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_)
self.assertTrue(check_models_equal(UpperCAmelCase_ , UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = 'bert'
a : Tuple = 'hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(UpperCAmelCase_):
a : List[Any] = FlaxBertModel.from_pretrained(UpperCAmelCase_)
a : Dict = FlaxBertModel.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = 'bert'
a : str = 'hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(UpperCAmelCase_):
a : List[Any] = FlaxBertModel.from_pretrained(UpperCAmelCase_)
a : Tuple = FlaxBertModel.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
| 610
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : str = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase : Optional[int] = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCamelCase : List[str] = {
"""camembert-base""": 512,
}
UpperCamelCase : List[Any] = """▁"""
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = VOCAB_FILES_NAMES
A : Any = PRETRAINED_VOCAB_FILES_MAP
A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Any = ["input_ids", "attention_mask"]
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]="<s>" , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Optional[Any]="<s>" , UpperCAmelCase_ : int="<unk>" , UpperCAmelCase_ : int="<pad>" , UpperCAmelCase_ : Tuple="<mask>" , UpperCAmelCase_ : int=["<s>NOTUSED", "</s>NOTUSED"] , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : Dict = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
a : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(UpperCAmelCase_))
a : Tuple = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
a : str = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
a : Optional[Any] = len(self.fairseq_tokens_to_ids)
a : List[str] = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
a : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a : Optional[int] = [self.cls_token_id]
a : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
a : List[Any] = [self.sep_token_id]
a : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : int = {self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : str):
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(UpperCAmelCase_) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : List[str] = []
a : List[str] = ''
a : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_) + token
a : Tuple = True
a : Optional[Any] = []
else:
current_sub_tokens.append(UpperCAmelCase_)
a : int = False
out_string += self.sp_model.decode(UpperCAmelCase_)
return out_string.strip()
def __getstate__( self : Union[str, Any]):
"""simple docstring"""
a : str = self.__dict__.copy()
a : List[Any] = None
return state
def __setstate__( self : List[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
a : Tuple = {}
a : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
a : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase_ , 'wb') as fi:
a : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (out_vocab_file,)
| 610
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'perceiver'
def __init__( self : List[Any] , a_ : List[Any]=256 , a_ : List[Any]=1280 , a_ : str=768 , a_ : List[Any]=1 , a_ : Tuple=26 , a_ : Optional[Any]=8 , a_ : str=8 , a_ : int=None , a_ : Dict=None , a_ : Dict="kv" , a_ : List[Any]=1 , a_ : Dict=1 , a_ : Dict="gelu" , a_ : Dict=0.1 , a_ : Optional[Any]=0.02 , a_ : Dict=1e-1_2 , a_ : Union[str, Any]=True , a_ : Optional[int]=262 , a_ : str=2048 , a_ : List[str]=56 , a_ : Dict=[368, 496] , a_ : int=16 , a_ : str=1920 , a_ : List[str]=16 , a_ : List[str]=[1, 16, 224, 224] , **a_ : Union[str, Any] , )-> Tuple:
"""simple docstring"""
super().__init__(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = num_latents
SCREAMING_SNAKE_CASE__ : Dict = d_latents
SCREAMING_SNAKE_CASE__ : Optional[Any] = d_model
SCREAMING_SNAKE_CASE__ : str = num_blocks
SCREAMING_SNAKE_CASE__ : Tuple = num_self_attends_per_block
SCREAMING_SNAKE_CASE__ : List[str] = num_self_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = num_cross_attention_heads
SCREAMING_SNAKE_CASE__ : Any = qk_channels
SCREAMING_SNAKE_CASE__ : Tuple = v_channels
SCREAMING_SNAKE_CASE__ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE__ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE__ : Dict = cross_attention_widening_factor
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : str = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE__ : List[Any] = image_size
# flow attributes
SCREAMING_SNAKE_CASE__ : Dict = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE__ : Tuple = num_frames
SCREAMING_SNAKE_CASE__ : int = audio_samples_per_frame
SCREAMING_SNAKE_CASE__ : Union[str, Any] = samples_per_patch
SCREAMING_SNAKE_CASE__ : Optional[int] = output_shape
class snake_case ( UpperCamelCase_ ):
@property
def __lowercase( self : str )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE__ : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def __lowercase( self : Tuple )-> float:
"""simple docstring"""
return 1e-4
def __lowercase( self : List[str] , a_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a_ : int = -1 , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , a_ : int = 3 , a_ : int = 40 , a_ : int = 40 , )-> Mapping[str, Any]:
"""simple docstring"""
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(a_ , a_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : List[Any] = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : Any = preprocessor.num_special_tokens_to_add(a_ )
SCREAMING_SNAKE_CASE__ : str = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [' '.join(['a'] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE__ : Tuple = dict(preprocessor(a_ , return_tensors=a_ ) )
SCREAMING_SNAKE_CASE__ : Dict = inputs.pop('input_ids' )
return inputs
elif isinstance(a_ , a_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : Dict = compute_effective_axis_dimension(a_ , fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._generate_dummy_images(a_ , a_ , a_ , a_ )
SCREAMING_SNAKE_CASE__ : Tuple = dict(preprocessor(images=a_ , return_tensors=a_ ) )
SCREAMING_SNAKE_CASE__ : str = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 85
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = "Hello world! cécé herlolip"
SCREAMING_SNAKE_CASE__ : Dict = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = BertAbsConfig(
temp_dir='.' , finetune_bert=lowercase__ , large=lowercase__ , share_emb=lowercase__ , use_bert_emb=lowercase__ , encoder='bert' , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.load(lowercase__ , lambda lowercase__ , lowercase__ : storage )
SCREAMING_SNAKE_CASE__ : Any = AbsSummarizer(lowercase__ , torch.device('cpu' ) , lowercase__ )
original.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = BertAbsSummarizer(lowercase__ , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
SCREAMING_SNAKE_CASE__ : Any = BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__ )) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(lowercase__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__ )) )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(lowercase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE__ : int = encoder_input_ids
SCREAMING_SNAKE_CASE__ : Any = decoder_input_ids
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE__ : Optional[Any] = original(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = original.generator(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = new_model(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0]
SCREAMING_SNAKE_CASE__ : List[Any] = new_model.generator(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(lowercase__ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(lowercase__ ) )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 85
| 1
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__a = 1
@register_to_config
def __init__( self , lowerCAmelCase = 1000 , lowerCAmelCase = None ) -> Optional[Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(lowerCAmelCase )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__: Optional[int]= 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE__: Optional[Any]= 4
# running values
SCREAMING_SNAKE_CASE__: Optional[Any]= []
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> int:
SCREAMING_SNAKE_CASE__: Tuple= num_inference_steps
SCREAMING_SNAKE_CASE__: List[str]= torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
SCREAMING_SNAKE_CASE__: List[str]= torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE__: List[Any]= torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= torch.sin(steps * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE__: Union[str, Any]= (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE__: Optional[Any]= (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE__: str= timesteps.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= []
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
SCREAMING_SNAKE_CASE__: Optional[Any]= (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE__: Optional[int]= timestep_index + 1
SCREAMING_SNAKE_CASE__: Optional[Any]= sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(lowerCAmelCase )
if len(self.ets ) == 1:
SCREAMING_SNAKE_CASE__: Any= self.ets[-1]
elif len(self.ets ) == 2:
SCREAMING_SNAKE_CASE__: List[str]= (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
SCREAMING_SNAKE_CASE__: int= (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
SCREAMING_SNAKE_CASE__: Tuple= (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE__: Dict= self._get_prev_sample(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ) -> torch.FloatTensor:
return sample
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.alphas[timestep_index]
SCREAMING_SNAKE_CASE__: List[str]= self.betas[timestep_index]
SCREAMING_SNAKE_CASE__: List[Any]= self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE__: Optional[Any]= self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE__: List[str]= (sample - sigma * ets) / max(lowerCAmelCase , 1e-8 )
SCREAMING_SNAKE_CASE__: int= next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> Optional[Any]:
return self.config.num_train_timesteps
| 107
|
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowercase_ : str = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class _lowerCamelCase :
__a = 42
__a = None
__a = None
__a = None
__a = None
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Dict:
return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def UpperCamelCase_ ( self ) -> List[str]:
return self.major, self.minor, self.patch
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Dict:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
return Version(lowerCAmelCase )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
return other
raise TypeError(f'{other} (type {type(lowerCAmelCase )}) cannot be compared to version.' )
def __eq__( self , lowerCAmelCase ) -> Optional[int]:
try:
SCREAMING_SNAKE_CASE__: List[str]= self._validate_operand(lowerCAmelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[Any]= self._validate_operand(lowerCAmelCase )
return self.tuple < other.tuple
def __hash__( self ) -> List[Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def UpperCamelCase_ ( cls , lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__: Dict= {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def UpperCamelCase_ ( self ) -> str:
return self.version_str
def A__ ( snake_case_ : int ):
SCREAMING_SNAKE_CASE__: List[Any]= _VERSION_REG.match(snake_case_ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(snake_case_ ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def A__ ( snake_case_ : Union[str, Any] ):
return ".".join(str(snake_case_ ) for v in version_tuple )
| 107
| 1
|
import math
import sys
import cva
import numpy as np
def lowerCamelCase__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : float ):
# For applying gaussian function for each element in matrix.
__UpperCAmelCase : int = math.sqrt(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def lowerCamelCase__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
__UpperCAmelCase : List[Any] = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : float ):
# Creates a gaussian kernel of given dimension.
__UpperCAmelCase : Union[str, Any] = np.zeros((kernel_size, kernel_size) )
for i in range(0 , __lowerCamelCase ):
for j in range(0 , __lowerCamelCase ):
__UpperCAmelCase : str = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int , ):
__UpperCAmelCase : Optional[Any] = np.zeros(img.shape )
__UpperCAmelCase : int = get_gauss_kernel(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Tuple = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__UpperCAmelCase : int = get_slice(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = img_s - img_s[kernel_size // 2, kernel_size // 2]
__UpperCAmelCase : Optional[Any] = vec_gaussian(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Any = np.multiply(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : str = np.multiply(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = np.sum(__lowerCamelCase ) / np.sum(__lowerCamelCase )
__UpperCAmelCase : List[Any] = val
return imga
def lowerCamelCase__ ( __lowerCamelCase : list ):
__UpperCAmelCase : List[str] = args[1] if args[1:] else """../image_data/lena.jpg"""
__UpperCAmelCase : Optional[Any] = float(args[2] ) if args[2:] else 1.0
__UpperCAmelCase : Dict = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__UpperCAmelCase : Optional[int] = int(args[4] )
__UpperCAmelCase : List[str] = kernel_size + abs(kernel_size % 2 - 1 )
else:
__UpperCAmelCase : int = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a ,a ,a ,a : Optional[Any] = parse_args(sys.argv)
a : Optional[int] = cva.imread(filename, 0)
cva.imshow("input image", img)
a : int = img / 255
a : Union[str, Any] = out.astype("float32")
a : Any = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a : Optional[int] = out * 255
a : Union[str, Any] = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 63
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowercase : Any =(
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowercase : Union[str, Any] =(
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowercase : List[str] =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowercase : str =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowercase : Union[str, Any] =(
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowercase : str =(
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowercase : int =(
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) )
UpperCAmelCase_ =["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
UpperCAmelCase_ , UpperCAmelCase_ =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def a__ ( lowercase__ = 1_0_0 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(lowercase__ ))
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand(lowercase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand(lowercase__ ) for hand in SORTED_HANDS]
UpperCAmelCase_ =poker_hands.copy()
shuffle(lowercase__ )
UpperCAmelCase_ =chain(sorted(lowercase__ ) )
for index, hand in enumerate(lowercase__ ):
assert hand == poker_hands[index]
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=lowercase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand("2C 4S AS 3D 5C" )
UpperCAmelCase_ =True
UpperCAmelCase_ =[5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =0
UpperCAmelCase_ =os.path.abspath(os.path.dirname(lowercase__ ) )
UpperCAmelCase_ =os.path.join(lowercase__ , "poker_hands.txt" )
with open(lowercase__ ) as file_hand:
for line in file_hand:
UpperCAmelCase_ =line[:1_4].strip()
UpperCAmelCase_ =line[1_5:].strip()
UpperCAmelCase_ , UpperCAmelCase_ =PokerHand(lowercase__ ), PokerHand(lowercase__ )
UpperCAmelCase_ =player.compare_with(lowercase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 54
| 0
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : List[Any] =["""image_processor""", """tokenizer"""]
__a : Dict ="""AutoImageProcessor"""
__a : Tuple ="""AutoTokenizer"""
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ ):
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase_ , )
lowerCAmelCase = kwargs.pop('''feature_extractor''' )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase = self.image_processor
lowerCAmelCase = False
def __call__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase = kwargs.pop('''images''' , UpperCAmelCase_ )
lowerCAmelCase = kwargs.pop('''text''' , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowerCAmelCase = self.image_processor(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None:
lowerCAmelCase = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase = encodings['''input_ids''']
return inputs
def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@contextmanager
def __snake_case ( self ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
lowerCAmelCase = True
lowerCAmelCase = self.tokenizer
yield
lowerCAmelCase = self.image_processor
lowerCAmelCase = False
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_=None ):
if added_vocab is None:
lowerCAmelCase = self.tokenizer.get_added_vocab()
lowerCAmelCase = {}
while tokens:
lowerCAmelCase = re.search(r'''<s_(.*?)>''' , UpperCAmelCase_ , re.IGNORECASE )
if start_token is None:
break
lowerCAmelCase = start_token.group(1 )
lowerCAmelCase = re.search(rF"""</s_{key}>""" , UpperCAmelCase_ , re.IGNORECASE )
lowerCAmelCase = start_token.group()
if end_token is None:
lowerCAmelCase = tokens.replace(UpperCAmelCase_ , '''''' )
else:
lowerCAmelCase = end_token.group()
lowerCAmelCase = re.escape(UpperCAmelCase_ )
lowerCAmelCase = re.escape(UpperCAmelCase_ )
lowerCAmelCase = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , UpperCAmelCase_ , re.IGNORECASE )
if content is not None:
lowerCAmelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCAmelCase = self.tokenajson(UpperCAmelCase_ , is_inner_value=UpperCAmelCase_ , added_vocab=UpperCAmelCase_ )
if value:
if len(UpperCAmelCase_ ) == 1:
lowerCAmelCase = value[0]
lowerCAmelCase = value
else: # leaf nodes
lowerCAmelCase = []
for leaf in content.split(r'''<sep/>''' ):
lowerCAmelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCAmelCase = leaf[1:-2] # for categorical special tokens
output[key].append(UpperCAmelCase_ )
if len(output[key] ) == 1:
lowerCAmelCase = output[key][0]
lowerCAmelCase = tokens[tokens.find(UpperCAmelCase_ ) + len(UpperCAmelCase_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=UpperCAmelCase_ , added_vocab=UpperCAmelCase_ )
if len(UpperCAmelCase_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __snake_case ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase_ , )
return self.image_processor_class
@property
def __snake_case ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase_ , )
return self.image_processor
| 714
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Any =BertJapaneseTokenizer
__a : Optional[int] =False
__a : int =True
def __snake_case ( self ):
super().setUp()
lowerCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。'''
lowerCAmelCase = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase = self.get_input_output_texts(UpperCAmelCase_ )
lowerCAmelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return text, ids
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file )
lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(UpperCAmelCase_ )
lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。'''
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCAmelCase_ , '''wb''' ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , '''rb''' ) as handle:
lowerCAmelCase = pickle.load(UpperCAmelCase_ )
lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __snake_case ( self ):
lowerCAmelCase = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self ):
try:
lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self ):
try:
lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self ):
lowerCAmelCase = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self ):
try:
lowerCAmelCase = MecabTokenizer(
do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def __snake_case ( self ):
lowerCAmelCase = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(UpperCAmelCase_ )
lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。'''
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCAmelCase_ , '''wb''' ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , '''rb''' ) as handle:
lowerCAmelCase = pickle.load(UpperCAmelCase_ )
lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(UpperCAmelCase_ )
lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。'''
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCAmelCase_ , '''wb''' ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , '''rb''' ) as handle:
lowerCAmelCase = pickle.load(UpperCAmelCase_ )
lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer(do_lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer(normalize_text=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer(trim_whitespace=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def __snake_case ( self ):
lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
lowerCAmelCase = {}
for i, token in enumerate(UpperCAmelCase_ ):
lowerCAmelCase = i
lowerCAmelCase = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def __snake_case ( self ):
lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
lowerCAmelCase = tokenizer.subword_tokenizer
lowerCAmelCase = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(UpperCAmelCase_ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
lowerCAmelCase = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(UpperCAmelCase_ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Union[str, Any] =BertJapaneseTokenizer
__a : Optional[int] =False
def __snake_case ( self ):
super().setUp()
lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __snake_case ( self , **UpperCAmelCase_ ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。'''
lowerCAmelCase = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
UpperCAmelCase_ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __snake_case ( self ):
lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowerCAmelCase = {}
for i, token in enumerate(UpperCAmelCase_ ):
lowerCAmelCase = i
lowerCAmelCase = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
lowerCAmelCase = '''cl-tohoku/bert-base-japanese'''
lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
lowerCAmelCase = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
lowerCAmelCase = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 33
| 0
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class A :
def __init__( self : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int=13 , __magic_name__ : List[Any]=7 , __magic_name__ : Any=True , __magic_name__ : Optional[int]=True , __magic_name__ : Tuple=False , __magic_name__ : List[Any]=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=32 , __magic_name__ : str=5 , __magic_name__ : int=4 , __magic_name__ : Dict=37 , __magic_name__ : Tuple="gelu" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : List[str]=512 , __magic_name__ : Dict=16 , __magic_name__ : Any=2 , __magic_name__ : Any=0.02 , __magic_name__ : Tuple=3 , __magic_name__ : List[str]=4 , __magic_name__ : Union[str, Any]=None , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , use_stable_embedding=__magic_name__ , )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = OpenLlamaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(__magic_name__ , attention_mask=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = True
lowerCAmelCase__ = OpenLlamaModel(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , )
lowerCAmelCase__ = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , )
lowerCAmelCase__ = model(__magic_name__ , attention_mask=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = OpenLlamaForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : int , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Any , ):
"""simple docstring"""
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = OpenLlamaForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# first forward pass
lowerCAmelCase__ = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , use_cache=__magic_name__ , )
lowerCAmelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , output_hidden_states=__magic_name__ , )["hidden_states"][0]
lowerCAmelCase__ = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , output_hidden_states=__magic_name__ , )["hidden_states"][0]
# select random slice
lowerCAmelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Dict = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
snake_case__ :Tuple = (OpenLlamaForCausalLM,) if is_torch_available() else ()
snake_case__ :List[str] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ :List[str] = False
snake_case__ :Any = False
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = OpenLlamaModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ = type
self.model_tester.create_and_check_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = input_dict["input_ids"]
lowerCAmelCase__ = input_ids.ne(1 ).to(__magic_name__ )
lowerCAmelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ = OpenLlamaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = "single_label_classification"
lowerCAmelCase__ = input_dict["input_ids"]
lowerCAmelCase__ = input_ids.ne(1 ).to(__magic_name__ )
lowerCAmelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ = OpenLlamaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = "multi_label_classification"
lowerCAmelCase__ = input_dict["input_ids"]
lowerCAmelCase__ = input_ids.ne(1 ).to(__magic_name__ )
lowerCAmelCase__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ = OpenLlamaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ = OpenLlamaModel(__magic_name__ )
original_model.to(__magic_name__ )
original_model.eval()
lowerCAmelCase__ = original_model(__magic_name__ ).last_hidden_state
lowerCAmelCase__ = original_model(__magic_name__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ = {"type": scaling_type, "factor": 10.0}
lowerCAmelCase__ = OpenLlamaModel(__magic_name__ )
scaled_model.to(__magic_name__ )
scaled_model.eval()
lowerCAmelCase__ = scaled_model(__magic_name__ ).last_hidden_state
lowerCAmelCase__ = scaled_model(__magic_name__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
| 48
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=4 , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : str = seq_length
__a : List[str] = is_training
__a : Optional[Any] = use_attention_mask
__a : Optional[Any] = use_token_type_ids
__a : List[str] = use_labels
__a : Union[str, Any] = vocab_size
__a : int = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : Dict = intermediate_size
__a : List[str] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Optional[int] = type_sequence_label_size
__a : Optional[Any] = initializer_range
__a : Optional[int] = num_choices
def _lowerCamelCase ( self ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Union[str, Any] = None
if self.use_attention_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Any = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ):
__a : Dict = self.prepare_config_and_inputs()
__a , __a , __a , __a : str = config_and_inputs
__a : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a , __a : Union[str, Any] = config_and_inputs
__a : Optional[int] = True
__a : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ):
__a : Dict = FlaxRobertaModelTester(self )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__a : int = model_class_name.from_pretrained('''roberta-base''' , from_pt=_UpperCAmelCase )
__a : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 52
| 0
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowercase ( __lowerCamelCase ):
_lowercase : List[str] = (EulerDiscreteScheduler,)
_lowercase : Tuple = 10
def UpperCamelCase ( self : Any , **lowerCamelCase__ : int ) -> Any:
"""simple docstring"""
A_ = {
'''num_train_timesteps''': 1_1_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCamelCase__ )
return config
def UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase__ , beta_end=lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
A_ = torch.manual_seed(0 )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ = sample.to(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
A_ = model(lowerCamelCase__ , lowerCamelCase__ )
A_ = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(lowerCamelCase__ ) )
A_ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
A_ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
A_ = torch.manual_seed(0 )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ = sample.to(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
A_ = model(lowerCamelCase__ , lowerCamelCase__ )
A_ = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(lowerCamelCase__ ) )
A_ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase__ )
A_ = torch.manual_seed(0 )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
A_ = sample.to(lowerCamelCase__ )
for t in scheduler.timesteps:
A_ = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
A_ = model(lowerCamelCase__ , lowerCamelCase__ )
A_ = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(lowerCamelCase__ ) )
A_ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**lowerCamelCase__ , use_karras_sigmas=lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase__ )
A_ = torch.manual_seed(0 )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
A_ = sample.to(lowerCamelCase__ )
for t in scheduler.timesteps:
A_ = scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
A_ = model(lowerCamelCase__ , lowerCamelCase__ )
A_ = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(lowerCamelCase__ ) )
A_ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 563
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class _lowercase ( __lowerCamelCase ):
_lowercase : Optional[int] = 'Wav2Vec2FeatureExtractor'
_lowercase : int = 'AutoTokenizer'
def __init__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
A_ = self.feature_extractor
A_ = False
@classmethod
def UpperCamelCase ( cls : Optional[int] , lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
try:
return super().from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
except OSError:
warnings.warn(
F"Loading a tokenizer inside {cls.__name__} from a config that does not"
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , lowerCamelCase__ , )
A_ = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
A_ = WavaVecaCTCTokenizer.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
return cls(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
def __call__( self : Union[str, Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : Any ) -> List[str]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__ , **lowerCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
A_ = kwargs.pop('''raw_speech''' )
else:
A_ = kwargs.pop('''audio''' , lowerCamelCase__ )
A_ = kwargs.pop('''sampling_rate''' , lowerCamelCase__ )
A_ = kwargs.pop('''text''' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
A_ = args[0]
A_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
A_ = self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None:
A_ = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A_ = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self : Any , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCamelCase__ , **lowerCamelCase__ )
A_ = kwargs.pop('''input_features''' , lowerCamelCase__ )
A_ = kwargs.pop('''labels''' , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
A_ = args[0]
A_ = args[1:]
if input_features is not None:
A_ = self.feature_extractor.pad(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
if labels is not None:
A_ = self.tokenizer.pad(lowerCamelCase__ , **lowerCamelCase__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A_ = labels['''input_ids''']
return input_features
def UpperCamelCase ( self : Optional[int] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCamelCase ( self : Any , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@contextmanager
def UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
A_ = True
A_ = self.tokenizer
yield
A_ = self.feature_extractor
A_ = False
| 563
| 1
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
if b == 0:
return (1, 0)
((_UpperCAmelCase) , (_UpperCAmelCase)) = extended_euclid(A , a % b )
_UpperCAmelCase = a // b
return (y, x - k * y)
def UpperCAmelCase ( A : int , A : int , A : int , A : int ):
'''simple docstring'''
((_UpperCAmelCase) , (_UpperCAmelCase)) = extended_euclid(A , A )
_UpperCAmelCase = na * na
_UpperCAmelCase = ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
((_UpperCAmelCase) , (_UpperCAmelCase)) = extended_euclid(A , A )
if b < 0:
_UpperCAmelCase = (b % n + n) % n
return b
def UpperCAmelCase ( A : int , A : int , A : int , A : int ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = invert_modulo(A , A ), invert_modulo(A , A )
_UpperCAmelCase = na * na
_UpperCAmelCase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 573
|
"""simple docstring"""
from __future__ import annotations
from random import choice
def UpperCAmelCase ( A : Union[str, Any] ):
'''simple docstring'''
return choice(A )
def UpperCAmelCase ( A : list[int] , A : int ):
'''simple docstring'''
_UpperCAmelCase = random_pivot(A )
# partition based on pivot
# linear time
_UpperCAmelCase = [e for e in lst if e < pivot]
_UpperCAmelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(A ) < k - 1:
return kth_number(A , k - len(A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(A , A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 573
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase :str = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Dict = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowerCamelCase :int = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCamelCase :int = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
lowerCamelCase :Optional[Any] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _a (self , lowercase=None , lowercase=None , lowercase=False ):
if concatenate_texts:
return compute_measures(lowercase , lowercase )["wer"]
else:
A_ : List[Any] = 0
A_ : Optional[int] = 0
for prediction, reference in zip(lowercase , lowercase ):
A_ : Any = compute_measures(lowercase , lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 686
| 0
|
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
# load base model
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
SCREAMING_SNAKE_CASE : Any = load_file(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
SCREAMING_SNAKE_CASE : Dict = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
SCREAMING_SNAKE_CASE : Tuple = pipeline.text_encoder
else:
SCREAMING_SNAKE_CASE : Any = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
SCREAMING_SNAKE_CASE : Dict = pipeline.unet
# find the target layer
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_infos.pop(0 )
while len(__lowerCAmelCase ) > -1:
try:
SCREAMING_SNAKE_CASE : List[Any] = curr_layer.__getattr__(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE : Optional[int] = layer_infos.pop(0 )
elif len(__lowerCAmelCase ) == 0:
break
except Exception:
if len(__lowerCAmelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
SCREAMING_SNAKE_CASE : List[str] = layer_infos.pop(0 )
SCREAMING_SNAKE_CASE : str = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(__lowerCAmelCase )
else:
pair_keys.append(__lowerCAmelCase )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
SCREAMING_SNAKE_CASE : Any = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
SCREAMING_SNAKE_CASE : str = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__lowerCAmelCase , __lowerCAmelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
SCREAMING_SNAKE_CASE : List[Any] = state_dict[pair_keys[0]].to(torch.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__lowerCAmelCase , __lowerCAmelCase )
# update visited list
for item in pair_keys:
visited.append(__lowerCAmelCase )
return pipeline
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_lowerCamelCase : List[Any] = parser.parse_args()
_lowerCamelCase : List[str] = args.base_model_path
_lowerCamelCase : str = args.checkpoint_path
_lowerCamelCase : Dict = args.dump_path
_lowerCamelCase : Any = args.lora_prefix_unet
_lowerCamelCase : List[Any] = args.lora_prefix_text_encoder
_lowerCamelCase : List[Any] = args.alpha
_lowerCamelCase : Optional[Any] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_lowerCamelCase : Optional[Any] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 352
|
from datetime import datetime
import requests
def __a ( __lowerCAmelCase ) -> bytes:
SCREAMING_SNAKE_CASE : int = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
SCREAMING_SNAKE_CASE : Any = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(__lowerCAmelCase ).content
if __name__ == "__main__":
_lowerCamelCase : List[Any] = input("""Enter Video/IGTV url: """).strip()
_lowerCamelCase : int = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 352
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
lowercase = TextToVideoSDPipeline
lowercase = TEXT_TO_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowercase = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def lowerCamelCase ( self : int ):
torch.manual_seed(0 )
snake_case__ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
snake_case__ : Union[str, Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
snake_case__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
snake_case__ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
snake_case__ : Dict = CLIPTextModel(_lowercase )
snake_case__ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowerCamelCase ( self : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Tuple=0 ):
if str(_lowercase ).startswith("""mps""" ):
snake_case__ : Tuple = torch.manual_seed(_lowercase )
else:
snake_case__ : List[str] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
snake_case__ : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : List[str] = self.get_dummy_components()
snake_case__ : int = TextToVideoSDPipeline(**_lowercase )
snake_case__ : Dict = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
snake_case__ : Optional[Any] = self.get_dummy_inputs(_lowercase )
snake_case__ : Any = 'np'
snake_case__ : Union[str, Any] = sd_pipe(**_lowercase ).frames
snake_case__ : int = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
snake_case__ : List[str] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self : int ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_lowercase , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_lowercase , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase ( self : Union[str, Any] ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase ( self : Union[str, Any] ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCamelCase ( self : Union[str, Any] ):
pass
def lowerCamelCase ( self : str ):
return super().test_progress_bar()
@slow
@skip_mps
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
snake_case__ : List[str] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
snake_case__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
snake_case__ : int = pipe.to("""cuda""" )
snake_case__ : int = 'Spiderman is surfing'
snake_case__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case__ : int = pipe(_lowercase , generator=_lowercase , num_inference_steps=25 , output_type="""pt""" ).frames
snake_case__ : Tuple = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
snake_case__ : Union[str, Any] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
snake_case__ : List[str] = pipe.to("""cuda""" )
snake_case__ : str = 'Spiderman is surfing'
snake_case__ : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case__ : List[str] = pipe(_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""pt""" ).frames
snake_case__ : Any = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 301
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
lowerCAmelCase_ = """dpt"""
def __init__( self : List[Any] , UpperCamelCase__ : List[str]=768 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Union[str, Any]=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1E-12 , UpperCamelCase__ : str=384 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : int=[2, 5, 8, 11] , UpperCamelCase__ : Optional[int]="project" , UpperCamelCase__ : List[Any]=[4, 2, 1, 0.5] , UpperCamelCase__ : Dict=[96, 192, 384, 768] , UpperCamelCase__ : Dict=256 , UpperCamelCase__ : int=-1 , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Union[str, Any]=0.4 , UpperCamelCase__ : Tuple=255 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Optional[int]=[1, 1024, 24, 24] , UpperCamelCase__ : Optional[int]=[0, 1] , UpperCamelCase__ : Dict=None , **UpperCamelCase__ : Any , ) -> Tuple:
super().__init__(**UpperCamelCase__ )
_UpperCamelCase =hidden_size
_UpperCamelCase =is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
_UpperCamelCase ={
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
_UpperCamelCase =BitConfig(**UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
_UpperCamelCase =BitConfig(**UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCamelCase =backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
_UpperCamelCase =backbone_featmap_shape
_UpperCamelCase =neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
_UpperCamelCase =None
_UpperCamelCase =None
_UpperCamelCase =[]
_UpperCamelCase =num_hidden_layers
_UpperCamelCase =num_attention_heads
_UpperCamelCase =intermediate_size
_UpperCamelCase =hidden_act
_UpperCamelCase =hidden_dropout_prob
_UpperCamelCase =attention_probs_dropout_prob
_UpperCamelCase =initializer_range
_UpperCamelCase =layer_norm_eps
_UpperCamelCase =image_size
_UpperCamelCase =patch_size
_UpperCamelCase =num_channels
_UpperCamelCase =qkv_bias
_UpperCamelCase =backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
_UpperCamelCase =readout_type
_UpperCamelCase =reassemble_factors
_UpperCamelCase =neck_hidden_sizes
_UpperCamelCase =fusion_hidden_size
_UpperCamelCase =head_in_index
_UpperCamelCase =use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_UpperCamelCase =use_auxiliary_head
_UpperCamelCase =auxiliary_loss_weight
_UpperCamelCase =semantic_loss_ignore_index
_UpperCamelCase =semantic_classifier_dropout
def UpperCamelCase__ ( self : List[Any] ) -> Tuple:
_UpperCamelCase =copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_UpperCamelCase =self.backbone_config.to_dict()
_UpperCamelCase =self.__class__.model_type
return output
| 404
|
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__lowerCamelCase : List[str] = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(__SCREAMING_SNAKE_CASE ) , version.parse(__SCREAMING_SNAKE_CASE ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
_UpperCamelCase =f'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(r'''^[\w_\-\d]+$''' , __SCREAMING_SNAKE_CASE ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase =requirement, None, None
else:
_UpperCamelCase =re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , __SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f''' got {requirement}''' )
_UpperCamelCase , _UpperCamelCase =match[0]
_UpperCamelCase =want_full.split(''',''' ) # there could be multiple requirements
_UpperCamelCase ={}
for w in want_range:
_UpperCamelCase =re.findall(r'''^([\s!=<>]{1,2})(.+)''' , __SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f''' but got {requirement}''' )
_UpperCamelCase , _UpperCamelCase =match[0]
_UpperCamelCase =want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
_UpperCamelCase ='''.'''.join([str(__SCREAMING_SNAKE_CASE ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return
# check if any version is installed
try:
_UpperCamelCase =importlib.metadata.version(__SCREAMING_SNAKE_CASE )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase ='''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 404
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( A_ , A_ , A_ ) -> Dict:
# Initialise PyTorch model
_lowercase = RemBertConfig.from_json_file(A_ )
print("Building PyTorch model from configuration: {}".format(str(A_ ) ) )
_lowercase = RemBertModel(A_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A_ , A_ , A_ )
# Save pytorch-model
print("Save PyTorch model to {}".format(A_ ) )
torch.save(model.state_dict() , A_ )
if __name__ == "__main__":
__magic_name__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ : str = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 602
|
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__magic_name__ : List[Any] = logging.get_logger(__name__)
def A__ ( A_ , A_ , A_ , A_=None , A_=None ) -> Tuple:
# Recurse if needed
if "." in tensor_name:
_lowercase = tensor_name.split("." )
for split in splits[:-1]:
_lowercase = getattr(A_ , A_ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
_lowercase = new_module
_lowercase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
_lowercase = tensor_name in module._buffers
_lowercase = getattr(A_ , A_ )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
_lowercase = False
_lowercase = False
if is_buffer or not is_bitsandbytes_available():
_lowercase = False
_lowercase = False
else:
_lowercase = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
_lowercase = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
_lowercase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_lowercase = old_value.to(A_ )
elif isinstance(A_ , torch.Tensor ):
_lowercase = value.to("cpu" )
if value.dtype == torch.inta:
_lowercase = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
_lowercase = torch.tensor(A_ , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , A_ ) and fpaa_statistics is None:
_lowercase = new_value.T
_lowercase = old_value.__dict__
if is_abit:
_lowercase = bnb.nn.IntaParams(A_ , requires_grad=A_ , **A_ ).to(A_ )
elif is_abit:
_lowercase = bnb.nn.Paramsabit(A_ , requires_grad=A_ , **A_ ).to(A_ )
_lowercase = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(A_ ) )
else:
if value is None:
_lowercase = old_value.to(A_ )
elif isinstance(A_ , torch.Tensor ):
_lowercase = value.to(A_ )
else:
_lowercase = torch.tensor(A_ , device=A_ )
if is_buffer:
_lowercase = new_value
else:
_lowercase = nn.Parameter(A_ , requires_grad=old_value.requires_grad )
_lowercase = new_value
def A__ ( A_ , A_=None , A_=None , A_=None , A_=False ) -> Union[str, Any]:
for name, module in model.named_children():
if current_key_name is None:
_lowercase = []
current_key_name.append(A_ )
if (isinstance(A_ , nn.Linear ) or isinstance(A_ , A_ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(A_ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(A_ , A_ ):
_lowercase , _lowercase = module.weight.shape
else:
_lowercase = module.in_features
_lowercase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
_lowercase = bnb.nn.LinearabitLt(
A_ , A_ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
_lowercase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_lowercase = bnb.nn.Linearabit(
A_ , A_ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
_lowercase = True
# Store the module class in case we need to transpose the weight later
_lowercase = type(A_ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(A_ )
if len(list(module.children() ) ) > 0:
_lowercase , _lowercase = _replace_with_bnb_linear(
A_ , A_ , A_ , A_ , has_been_replaced=A_ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def A__ ( A_ , A_=None , A_=None , A_=None ) -> List[Any]:
_lowercase = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
_lowercase , _lowercase = _replace_with_bnb_linear(
A_ , A_ , A_ , A_ )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def A__ ( *A_ , **A_ ) -> int:
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , A_ , )
return replace_with_bnb_linear(*A_ , **A_ )
def A__ ( *A_ , **A_ ) -> Tuple:
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , A_ , )
return set_module_quantized_tensor_to_device(*A_ , **A_ )
def A__ ( A_ ) -> Tuple:
_lowercase = deepcopy(A_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_lowercase = find_tied_parameters(A_ )
# For compatibility with Accelerate < 0.18
if isinstance(A_ , A_ ):
_lowercase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_lowercase = sum(A_ , [] )
_lowercase = len(A_ ) > 0
# Check if it is a base model
_lowercase = not hasattr(A_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_lowercase = list(model.named_children() )
_lowercase = [list_modules[-1][0]]
# add last module together with tied weights
_lowercase = set(A_ ) - set(A_ )
_lowercase = list(set(A_ ) ) + list(A_ )
# remove ".weight" from the keys
_lowercase = [".weight", ".bias"]
_lowercase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_lowercase = name.replace(A_ , "" )
filtered_module_names.append(A_ )
return filtered_module_names
| 602
| 1
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int , __lowercase : list[list[int]] ) -> int:
'''simple docstring'''
def update_area_of_max_square(__lowercase : int , __lowercase : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_UpperCAmelCase = update_area_of_max_square(lowerCAmelCase__ , col + 1 )
_UpperCAmelCase = update_area_of_max_square(row + 1 , col + 1 )
_UpperCAmelCase = update_area_of_max_square(row + 1 , lowerCAmelCase__ )
if mat[row][col]:
_UpperCAmelCase = 1 + min([right, diagonal, down] )
_UpperCAmelCase = max(largest_square_area[0] , lowerCAmelCase__ )
return sub_problem_sol
else:
return 0
_UpperCAmelCase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int , __lowercase : list[list[int]] ) -> int:
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
__lowercase : int , __lowercase : int , __lowercase : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_UpperCAmelCase = update_area_of_max_square_using_dp_array(lowerCAmelCase__ , col + 1 , lowerCAmelCase__ )
_UpperCAmelCase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowerCAmelCase__ )
_UpperCAmelCase = update_area_of_max_square_using_dp_array(row + 1 , lowerCAmelCase__ , lowerCAmelCase__ )
if mat[row][col]:
_UpperCAmelCase = 1 + min([right, diagonal, down] )
_UpperCAmelCase = max(largest_square_area[0] , lowerCAmelCase__ )
_UpperCAmelCase = sub_problem_sol
return sub_problem_sol
else:
return 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [[-1] * cols for _ in range(lowerCAmelCase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowerCAmelCase__ )
return largest_square_area[0]
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int , __lowercase : list[list[int]] ) -> int:
'''simple docstring'''
_UpperCAmelCase = [[0] * (cols + 1) for _ in range(rows + 1 )]
_UpperCAmelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_UpperCAmelCase = dp_array[row][col + 1]
_UpperCAmelCase = dp_array[row + 1][col + 1]
_UpperCAmelCase = dp_array[row + 1][col]
if mat[row][col] == 1:
_UpperCAmelCase = 1 + min(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase = max(dp_array[row][col] , lowerCAmelCase__ )
else:
_UpperCAmelCase = 0
return largest_square_area
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int , __lowercase : list[list[int]] ) -> int:
'''simple docstring'''
_UpperCAmelCase = [0] * (cols + 1)
_UpperCAmelCase = [0] * (cols + 1)
_UpperCAmelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_UpperCAmelCase = current_row[col + 1]
_UpperCAmelCase = next_row[col + 1]
_UpperCAmelCase = next_row[col]
if mat[row][col] == 1:
_UpperCAmelCase = 1 + min(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase = max(current_row[col] , lowerCAmelCase__ )
else:
_UpperCAmelCase = 0
_UpperCAmelCase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 236
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A = logging.get_logger(__name__)
A = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
A = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
A = {'''facebook/blenderbot-3B''': 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Tuple = (
list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1))
)
_lowercase : Optional[int] = bs[:]
_lowercase : Optional[Any] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCAmelCase__)
cs.append(2**8 + n)
n += 1
_lowercase : int = [chr(lowerCAmelCase__) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__))
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Dict) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = set()
_lowercase : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
_lowercase : List[Any] = char
return pairs
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : Dict = VOCAB_FILES_NAMES
lowerCAmelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] ,UpperCamelCase : List[Any] ,UpperCamelCase : Optional[Any] ,UpperCamelCase : Optional[Any]="replace" ,UpperCamelCase : Optional[Any]="<s>" ,UpperCamelCase : Optional[Any]="</s>" ,UpperCamelCase : Optional[Any]="</s>" ,UpperCamelCase : Union[str, Any]="<s>" ,UpperCamelCase : List[str]="<unk>" ,UpperCamelCase : Optional[int]="<pad>" ,UpperCamelCase : Optional[int]="<mask>" ,UpperCamelCase : List[Any]=False ,**UpperCamelCase : Dict ,) -> int:
_lowercase : Tuple = AddedToken(UpperCamelCase ,lstrip=UpperCamelCase ,rstrip=UpperCamelCase ) if isinstance(UpperCamelCase ,UpperCamelCase ) else bos_token
_lowercase : Any = AddedToken(UpperCamelCase ,lstrip=UpperCamelCase ,rstrip=UpperCamelCase ) if isinstance(UpperCamelCase ,UpperCamelCase ) else eos_token
_lowercase : List[str] = AddedToken(UpperCamelCase ,lstrip=UpperCamelCase ,rstrip=UpperCamelCase ) if isinstance(UpperCamelCase ,UpperCamelCase ) else sep_token
_lowercase : Any = AddedToken(UpperCamelCase ,lstrip=UpperCamelCase ,rstrip=UpperCamelCase ) if isinstance(UpperCamelCase ,UpperCamelCase ) else cls_token
_lowercase : Tuple = AddedToken(UpperCamelCase ,lstrip=UpperCamelCase ,rstrip=UpperCamelCase ) if isinstance(UpperCamelCase ,UpperCamelCase ) else unk_token
_lowercase : int = AddedToken(UpperCamelCase ,lstrip=UpperCamelCase ,rstrip=UpperCamelCase ) if isinstance(UpperCamelCase ,UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowercase : int = AddedToken(UpperCamelCase ,lstrip=UpperCamelCase ,rstrip=UpperCamelCase ) if isinstance(UpperCamelCase ,UpperCamelCase ) else mask_token
super().__init__(
errors=UpperCamelCase ,bos_token=UpperCamelCase ,eos_token=UpperCamelCase ,unk_token=UpperCamelCase ,sep_token=UpperCamelCase ,cls_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,add_prefix_space=UpperCamelCase ,**UpperCamelCase ,)
with open(UpperCamelCase ,encoding='utf-8' ) as vocab_handle:
_lowercase : Union[str, Any] = json.load(UpperCamelCase )
_lowercase : str = {v: k for k, v in self.encoder.items()}
_lowercase : Tuple = errors # how to handle errors in decoding
_lowercase : List[Any] = bytes_to_unicode()
_lowercase : Any = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase ,encoding='utf-8' ) as merges_handle:
_lowercase : List[str] = merges_handle.read().split('\n' )[1:-1]
_lowercase : Dict = [tuple(merge.split() ) for merge in bpe_merges]
_lowercase : Optional[Any] = dict(zip(UpperCamelCase ,range(len(UpperCamelCase ) ) ) )
_lowercase : List[str] = {}
_lowercase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowercase : int = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
return len(self.encoder )
def _lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def _lowerCamelCase ( self : List[Any] ,UpperCamelCase : Optional[int] ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
_lowercase : Optional[int] = tuple(UpperCamelCase )
_lowercase : Optional[int] = get_pairs(UpperCamelCase )
if not pairs:
return token
while True:
_lowercase : Optional[int] = min(UpperCamelCase ,key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowercase , _lowercase : int = bigram
_lowercase : Optional[Any] = []
_lowercase : Optional[Any] = 0
while i < len(UpperCamelCase ):
try:
_lowercase : Union[str, Any] = word.index(UpperCamelCase ,UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowercase : Dict = j
if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowercase : Optional[Any] = tuple(UpperCamelCase )
_lowercase : List[str] = new_word
if len(UpperCamelCase ) == 1:
break
else:
_lowercase : int = get_pairs(UpperCamelCase )
_lowercase : Optional[int] = ' '.join(UpperCamelCase )
_lowercase : Any = word
return word
def _lowerCamelCase ( self : Tuple ,UpperCamelCase : List[Any] ) -> Optional[int]:
_lowercase : Optional[int] = []
for token in re.findall(self.pat ,UpperCamelCase ):
_lowercase : int = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase ).split(' ' ) )
return bpe_tokens
def _lowerCamelCase ( self : Union[str, Any] ,UpperCamelCase : Optional[Any] ) -> Dict:
return self.encoder.get(UpperCamelCase ,self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self : Dict ,UpperCamelCase : int ) -> str:
return self.decoder.get(UpperCamelCase )
def _lowerCamelCase ( self : Tuple ,UpperCamelCase : Union[str, Any] ) -> Any:
_lowercase : str = ''.join(UpperCamelCase )
_lowercase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' ,errors=self.errors )
return text
def _lowerCamelCase ( self : Union[str, Any] ,UpperCamelCase : str ,UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowercase : Dict = os.path.join(
UpperCamelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowercase : List[str] = os.path.join(
UpperCamelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCamelCase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=UpperCamelCase ,ensure_ascii=UpperCamelCase ) + '\n' )
_lowercase : Optional[int] = 0
with open(UpperCamelCase ,'w' ,encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_lowercase : List[Any] = token_index
writer.write(' '.join(UpperCamelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self : Optional[Any] ,UpperCamelCase : List[int] ,UpperCamelCase : Optional[List[int]] = None ,UpperCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase ,token_ids_a=UpperCamelCase ,already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1]
def _lowerCamelCase ( self : List[Any] ,UpperCamelCase : List[int] ,UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
_lowercase : Union[str, Any] = [self.sep_token_id]
_lowercase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self : Optional[int] ,UpperCamelCase : int ,UpperCamelCase : str=False ,**UpperCamelCase : Union[str, Any] ) -> str:
_lowercase : List[Any] = kwargs.pop('add_prefix_space' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase ) > 0 and not text[0].isspace()):
_lowercase : Optional[int] = ' ' + text
return (text, kwargs)
def _lowerCamelCase ( self : Tuple ,UpperCamelCase : List[int] ,UpperCamelCase : Optional[List[int]] = None ) -> List[Any]:
return token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self : Union[str, Any] ,UpperCamelCase : "Conversation" ) -> List[int]:
_lowercase : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase )
_lowercase : Dict = ' '.join(UpperCamelCase )
_lowercase : Optional[Any] = self.encode(UpperCamelCase )
if len(UpperCamelCase ) > self.model_max_length:
_lowercase : str = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 125
| 0
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'spiece.model'}
_A = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
_A = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
_A = 0
_A = 1
_A = 2
_A = 3
_A = 4
class _lowercase ( __UpperCAmelCase ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = 'left'
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<sep>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<cls>" , UpperCAmelCase_="<mask>" , UpperCAmelCase_=["<eop>", "<eod>"] , UpperCAmelCase_ = None , **UpperCAmelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
lowerCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
lowerCamelCase : str = 3
lowerCamelCase : Optional[Any] = do_lower_case
lowerCamelCase : Optional[Any] = remove_space
lowerCamelCase : Dict = keep_accents
lowerCamelCase : Dict = vocab_file
lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@property
def _UpperCamelCase ( self ) -> List[Any]:
return len(self.sp_model )
def _UpperCamelCase ( self ) -> str:
lowerCamelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
lowerCamelCase : Any = self.__dict__.copy()
lowerCamelCase : str = None
return state
def __setstate__( self , UpperCAmelCase_ ) -> Optional[int]:
lowerCamelCase : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCamelCase : Optional[int] = {}
lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> str:
if self.remove_space:
lowerCamelCase : Optional[int] = ' '.join(inputs.strip().split() )
else:
lowerCamelCase : List[str] = inputs
lowerCamelCase : Optional[int] = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
lowerCamelCase : List[str] = unicodedata.normalize('NFKD' , UpperCAmelCase_ )
lowerCamelCase : Any = ''.join([c for c in outputs if not unicodedata.combining(UpperCAmelCase_ )] )
if self.do_lower_case:
lowerCamelCase : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> List[str]:
lowerCamelCase : Tuple = self.preprocess_text(UpperCAmelCase_ )
lowerCamelCase : Optional[Any] = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
lowerCamelCase : Optional[Any] = []
for piece in pieces:
if len(UpperCAmelCase_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
lowerCamelCase : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase_ , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase : Optional[int] = cur_pieces[1:]
else:
lowerCamelCase : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase_ )
else:
new_pieces.append(UpperCAmelCase_ )
return new_pieces
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Tuple:
return self.sp_model.PieceToId(UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Tuple:
return self.sp_model.IdToPiece(UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Union[str, Any]:
lowerCamelCase : Optional[int] = ''.join(UpperCAmelCase_ ).replace(UpperCAmelCase_ , ' ' ).strip()
return out_string
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , **UpperCAmelCase_ , ) -> str:
lowerCamelCase : Dict = kwargs.pop('use_source_tokenizer' , UpperCAmelCase_ )
lowerCamelCase : List[Any] = self.convert_ids_to_tokens(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase : Dict = []
lowerCamelCase : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase_ ) )
lowerCamelCase : Tuple = []
sub_texts.append(UpperCAmelCase_ )
else:
current_sub_text.append(UpperCAmelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase : Union[str, Any] = ''.join(UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase : int = self.clean_up_tokenization(UpperCAmelCase_ )
return clean_text
else:
return text
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ) -> List[int]:
lowerCamelCase : List[str] = [self.sep_token_id]
lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1]
return ([0] * len(UpperCAmelCase_ )) + [1, 1]
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ) -> List[int]:
lowerCamelCase : Any = [self.sep_token_id]
lowerCamelCase : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase : Tuple = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , 'wb' ) as fi:
lowerCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 133
|
"""simple docstring"""
def UpperCAmelCase ( a_ ):
'''simple docstring'''
try:
lowerCamelCase : List[str] = float(a_ )
except ValueError:
raise ValueError('Please enter a valid number' )
lowerCamelCase : Dict = decimal - int(a_ )
if fractional_part == 0:
return int(a_ ), 1
else:
lowerCamelCase : Tuple = len(str(a_ ).split('.' )[1] )
lowerCamelCase : int = int(decimal * (10**number_of_frac_digits) )
lowerCamelCase : List[str] = 10**number_of_frac_digits
lowerCamelCase , lowerCamelCase : int = denominator, numerator
while True:
lowerCamelCase : Tuple = dividend % divisor
if remainder == 0:
break
lowerCamelCase , lowerCamelCase : Union[str, Any] = divisor, remainder
lowerCamelCase , lowerCamelCase : Any = numerator / divisor, denominator / divisor
return int(a_ ), int(a_ )
if __name__ == "__main__":
print(F"""{decimal_to_fraction(2) = }""")
print(F"""{decimal_to_fraction(89.0) = }""")
print(F"""{decimal_to_fraction('67') = }""")
print(F"""{decimal_to_fraction('45.0') = }""")
print(F"""{decimal_to_fraction(1.5) = }""")
print(F"""{decimal_to_fraction('6.25') = }""")
print(F"""{decimal_to_fraction('78td') = }""")
| 133
| 1
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[str] = (UniPCMultistepScheduler,)
__UpperCamelCase : Optional[Any] = (("num_inference_steps", 25),)
def __UpperCAmelCase ( self :int , **SCREAMING_SNAKE_CASE :int ) -> Tuple:
'''simple docstring'''
_a : Any ={
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""solver_type""": """bh2""",
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict=0 , **SCREAMING_SNAKE_CASE :Optional[Any] ) -> List[str]:
'''simple docstring'''
_a : int =dict(self.forward_default_kwargs )
_a : Tuple =kwargs.pop("""num_inference_steps""" , SCREAMING_SNAKE_CASE )
_a : str =self.dummy_sample
_a : Dict =0.1 * sample
_a : Any =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_a : str =self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
_a : List[Any] =scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
_a : Optional[Any] =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
_a : List[str] =scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
_a : List[str] =dummy_past_residuals[: new_scheduler.config.solver_order]
_a , _a : Tuple =sample, sample
for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
_a : List[Any] =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
_a : str =new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :int=0 , **SCREAMING_SNAKE_CASE :List[Any] ) -> Optional[int]:
'''simple docstring'''
_a : str =dict(self.forward_default_kwargs )
_a : List[Any] =kwargs.pop("""num_inference_steps""" , SCREAMING_SNAKE_CASE )
_a : Optional[int] =self.dummy_sample
_a : Union[str, Any] =0.1 * sample
_a : Optional[Any] =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_a : Tuple =self.get_scheduler_config()
_a : Optional[Any] =scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
_a : int =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
_a : Optional[int] =scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
_a : List[str] =dummy_past_residuals[: new_scheduler.config.solver_order]
_a : str =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
_a : List[Any] =new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :List[Any]=None , **SCREAMING_SNAKE_CASE :str ) -> Tuple:
'''simple docstring'''
if scheduler is None:
_a : Tuple =self.scheduler_classes[0]
_a : Tuple =self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
_a : int =scheduler_class(**SCREAMING_SNAKE_CASE )
_a : Tuple =self.scheduler_classes[0]
_a : List[str] =self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
_a : Tuple =scheduler_class(**SCREAMING_SNAKE_CASE )
_a : Optional[Any] =1_0
_a : Dict =self.dummy_model()
_a : Dict =self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
_a : Any =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : List[Any] =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
_a : Union[str, Any] =dict(self.forward_default_kwargs )
_a : List[Any] =kwargs.pop("""num_inference_steps""" , SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
_a : str =self.get_scheduler_config()
_a : Tuple =scheduler_class(**SCREAMING_SNAKE_CASE )
_a : int =self.dummy_sample
_a : Any =0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , """set_timesteps""" ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , """set_timesteps""" ):
_a : List[str] =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_a : str =[residual + 0.2, residual + 0.15, residual + 0.10]
_a : Optional[int] =dummy_past_residuals[: scheduler.config.solver_order]
_a : Any =scheduler.timesteps[5]
_a : Any =scheduler.timesteps[6]
_a : Dict =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
_a : Union[str, Any] =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCAmelCase ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_a : Optional[Any] =UniPCMultistepScheduler(**self.get_scheduler_config() )
_a : List[str] =self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
_a : Any =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
_a : Union[str, Any] =DPMSolverSinglestepScheduler.from_config(scheduler.config )
_a : Dict =DEISMultistepScheduler.from_config(scheduler.config )
_a : List[str] =DPMSolverMultistepScheduler.from_config(scheduler.config )
_a : Optional[Any] =UniPCMultistepScheduler.from_config(scheduler.config )
_a : Optional[int] =self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
_a : str =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
_a : Optional[int] =self.full_loop(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def __UpperCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 )
def __UpperCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
_a : List[str] =self.full_loop()
_a : str =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
_a : int =self.full_loop(prediction_type="""v_prediction""" )
_a : Tuple =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1_014 ) < 1e-3
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
_a : Optional[int] =self.scheduler_classes[0]
_a : Union[str, Any] =self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
_a : Tuple =scheduler_class(**SCREAMING_SNAKE_CASE )
_a : str =1_0
_a : str =self.dummy_model()
_a : Optional[Any] =self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
_a : List[Any] =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[Any] =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
def __UpperCAmelCase ( self :int , **SCREAMING_SNAKE_CASE :Tuple ) -> int:
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_a : Optional[Any] =self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 694
|
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A__ ( UpperCAmelCase__ ):
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Distribution , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :List[Any]=0 ) -> List[str]:
'''simple docstring'''
_a : int =1.0 if scale is None else scale
_a : Optional[Any] =0.0 if loc is None else loc
super().__init__(SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE )] )
@property
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def __UpperCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class A__ ( nn.Module ):
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE :Dict ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : Tuple =args_dim
_a : Tuple =nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] )
_a : Dict =domain_map
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Tuple[torch.Tensor]:
'''simple docstring'''
_a : Tuple =[proj(SCREAMING_SNAKE_CASE ) for proj in self.proj]
return self.domain_map(*SCREAMING_SNAKE_CASE )
class A__ ( nn.Module ):
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Tuple ) -> int:
'''simple docstring'''
super().__init__()
_a : List[Any] =function
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[int] , *SCREAMING_SNAKE_CASE :int ) -> List[Any]:
'''simple docstring'''
return self.function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
class A__ :
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__( self :Any , SCREAMING_SNAKE_CASE :int = 1 ) -> None:
'''simple docstring'''
_a : Any =dim
_a : List[Any] ={k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , ) -> Distribution:
'''simple docstring'''
_a : str =self._base_distribution(SCREAMING_SNAKE_CASE )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(SCREAMING_SNAKE_CASE , loc=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , event_dim=self.event_dim )
@property
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def __UpperCAmelCase ( self :Any ) -> float:
'''simple docstring'''
return 0.0
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :int ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __UpperCAmelCase ( self :int , *SCREAMING_SNAKE_CASE :torch.Tensor ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def __UpperCAmelCase ( cls :int , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Union[str, Any]:
'''simple docstring'''
_a : Tuple =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
_a : Optional[Any] =2.0 + cls.squareplus(SCREAMING_SNAKE_CASE )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Dict:
'''simple docstring'''
_a : List[str] =cls.squareplus(SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def __UpperCAmelCase ( cls :List[Any] , SCREAMING_SNAKE_CASE :torch.Tensor , SCREAMING_SNAKE_CASE :torch.Tensor ) -> Optional[int]:
'''simple docstring'''
_a : int =cls.squareplus(SCREAMING_SNAKE_CASE )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Distribution:
'''simple docstring'''
_a , _a : Any =distr_args
if self.dim == 1:
return self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE ) , 1 )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE :Optional[torch.Tensor] = None ) -> Distribution:
'''simple docstring'''
_a , _a : Optional[int] =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694
| 1
|
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__="pt" ):
'''simple docstring'''
A_ : List[str] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not line.startswith(""" """ ) else {}
A_ : Union[str, Any] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase__ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ):
'''simple docstring'''
A_ : Tuple = input_ids.ne(lowerCamelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase , lowercase="train" , lowercase=None , lowercase=None , lowercase=None , lowercase="" , ):
super().__init__()
A_ : List[str] = Path(lowercase ).joinpath(type_path + """.source""" )
A_ : Any = Path(lowercase ).joinpath(type_path + """.target""" )
A_ : int = self.get_char_lens(self.src_file )
A_ : Dict = max_source_length
A_ : Optional[int] = max_target_length
assert min(self.src_lens ) > 0, F'found empty line in {self.src_file}'
A_ : Any = tokenizer
A_ : List[Any] = prefix
if n_obs is not None:
A_ : int = self.src_lens[:n_obs]
A_ : List[Any] = src_lang
A_ : Tuple = tgt_lang
def __len__(self ):
return len(self.src_lens )
def __getitem__(self , lowercase ):
A_ : Optional[int] = index + 1 # linecache starts at 1
A_ : Dict = self.prefix + linecache.getline(str(self.src_file ) , lowercase ).rstrip("""\n""" )
A_ : int = linecache.getline(str(self.tgt_file ) , lowercase ).rstrip("""\n""" )
assert source_line, F'empty source line for index {index}'
assert tgt_line, F'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowercase ) else self.tokenizer
)
A_ : int = self.tokenizer.generator if isinstance(self.tokenizer , lowercase ) else self.tokenizer
A_ : int = encode_line(lowercase , lowercase , self.max_source_length , """right""" )
A_ : int = encode_line(lowercase , lowercase , self.max_target_length , """right""" )
A_ : List[str] = source_inputs["""input_ids"""].squeeze()
A_ : Any = target_inputs["""input_ids"""].squeeze()
A_ : Dict = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a (lowercase ):
return [len(lowercase ) for x in Path(lowercase ).open().readlines()]
def _a (self , lowercase ):
A_ : Any = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Tuple = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : str = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
A_ : Dict = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
A_ : str = trim_batch(lowercase , lowercase )
A_, A_ : Tuple = trim_batch(lowercase , lowercase , attention_mask=lowercase )
A_ : Any = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
lowerCamelCase :int = getLogger(__name__)
def a ( lowerCamelCase__ ):
'''simple docstring'''
return list(itertools.chain.from_iterable(lowerCamelCase__ ) )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , """git_log.json""" ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4 , **lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ , """w""" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=lowerCamelCase__ , **lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ ) as f:
return json.load(lowerCamelCase__ )
def a ( ):
'''simple docstring'''
A_ : int = git.Repo(search_parent_directories=lowerCamelCase__ )
A_ : Union[str, Any] = {
"""repo_id""": str(lowerCamelCase__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return list(map(lowerCamelCase__ , lowerCamelCase__ ) )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ , """wb""" ) as f:
return pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
def remove_articles(lowerCamelCase__ ):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
A_ : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = normalize_answer(lowerCamelCase__ ).split()
A_ : Dict = normalize_answer(lowerCamelCase__ ).split()
A_ : Optional[Any] = Counter(lowerCamelCase__ ) & Counter(lowerCamelCase__ )
A_ : Union[str, Any] = sum(common.values() )
if num_same == 0:
return 0
A_ : Any = 1.0 * num_same / len(lowerCamelCase__ )
A_ : int = 1.0 * num_same / len(lowerCamelCase__ )
A_ : int = (2 * precision * recall) / (precision + recall)
return fa
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
A_ : Tuple = 0
for hypo, pred in zip(lowerCamelCase__ , lowerCamelCase__ ):
em += exact_match_score(lowerCamelCase__ , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
em /= len(lowerCamelCase__ )
return {"em": em}
def a ( lowerCamelCase__ ):
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : List[str] = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and not hasattr(lowerCamelCase__ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
continue
A_ : Tuple = p if hasattr(lowerCamelCase__ , lowerCamelCase__ ) else equivalent_param[p]
setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
return hparams, config
| 686
|
'''simple docstring'''
import os
import sys
import unittest
lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Tuple = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""}
A_ : Union[str, Any] = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : Optional[Any] = get_model_to_test_mapping(lowercase )
A_ : List[str] = get_model_to_test_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A_ : Any = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : List[Any] = get_model_to_tester_mapping(lowercase )
A_ : Optional[int] = get_model_to_tester_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A_ : Dict = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
| 686
| 1
|
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class snake_case :
"""simple docstring"""
def __init__( self, _lowercase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = data
SCREAMING_SNAKE_CASE_ = [0x6_7_4_5_2_3_0_1, 0xe_f_c_d_a_b_8_9, 0x9_8_b_a_d_c_f_e, 0x1_0_3_2_5_4_7_6, 0xc_3_d_2_e_1_f_0]
@staticmethod
def a__ ( _lowercase, _lowercase ) -> List[str]:
return ((n << b) | (n >> (32 - b))) & 0xf_f_f_f_f_f_f_f
def a__ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = B'\x80' + B'\x00' * (63 - (len(self.data ) + 8) % 64)
SCREAMING_SNAKE_CASE_ = self.data + padding + struct.pack('>Q', 8 * len(self.data ) )
return padded_data
def a__ ( self ) -> Optional[Any]:
return [
self.padded_data[i : i + 64] for i in range(0, len(self.padded_data ), 64 )
]
def a__ ( self, _lowercase ) -> int:
SCREAMING_SNAKE_CASE_ = list(struct.unpack('>16L', _lowercase ) ) + [0] * 64
for i in range(16, 80 ):
SCREAMING_SNAKE_CASE_ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1 )
return w
def a__ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.padding()
SCREAMING_SNAKE_CASE_ = self.split_blocks()
for block in self.blocks:
SCREAMING_SNAKE_CASE_ = self.expand_block(_lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.h
for i in range(0, 80 ):
if 0 <= i < 20:
SCREAMING_SNAKE_CASE_ = (b & c) | ((~b) & d)
SCREAMING_SNAKE_CASE_ = 0x5_a_8_2_7_9_9_9
elif 20 <= i < 40:
SCREAMING_SNAKE_CASE_ = b ^ c ^ d
SCREAMING_SNAKE_CASE_ = 0x6_e_d_9_e_b_a_1
elif 40 <= i < 60:
SCREAMING_SNAKE_CASE_ = (b & c) | (b & d) | (c & d)
SCREAMING_SNAKE_CASE_ = 0x8_f_1_b_b_c_d_c
elif 60 <= i < 80:
SCREAMING_SNAKE_CASE_ = b ^ c ^ d
SCREAMING_SNAKE_CASE_ = 0xc_a_6_2_c_1_d_6
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
self.rotate(_lowercase, 5 ) + f + e + k + expanded_block[i] & 0xf_f_f_f_f_f_f_f,
a,
self.rotate(_lowercase, 30 ),
c,
d,
)
SCREAMING_SNAKE_CASE_ = (
self.h[0] + a & 0xf_f_f_f_f_f_f_f,
self.h[1] + b & 0xf_f_f_f_f_f_f_f,
self.h[2] + c & 0xf_f_f_f_f_f_f_f,
self.h[3] + d & 0xf_f_f_f_f_f_f_f,
self.h[4] + e & 0xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def _UpperCamelCase ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = B'Test String'
assert SHAaHash(lowerCAmelCase__ ).final_hash() == hashlib.shaa(lowerCAmelCase__ ).hexdigest() # noqa: S324
def _UpperCamelCase ( ) -> Any:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' ,dest='input_string' ,default='Hello World!! Welcome to Cryptography' ,help='Hash the string' ,)
parser.add_argument('--file' ,dest='input_file' ,help='Hash contents of a file' )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,'rb' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
else:
SCREAMING_SNAKE_CASE_ = bytes(lowerCAmelCase__ ,'utf-8' )
print(SHAaHash(lowerCAmelCase__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 294
|
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
def _UpperCamelCase ( ) -> List[Any]:
# Get the sagemaker specific mp parameters from smp_options variable.
SCREAMING_SNAKE_CASE_ = os.getenv('SM_HP_MP_PARAMETERS' ,'{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
SCREAMING_SNAKE_CASE_ = json.loads(lowerCAmelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
SCREAMING_SNAKE_CASE_ = os.getenv('SM_FRAMEWORK_PARAMS' ,'{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
SCREAMING_SNAKE_CASE_ = json.loads(lowerCAmelCase__ )
if not mpi_options.get('sagemaker_mpi_enabled' ,lowerCAmelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = field(
default="""""", metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""}, )
def a__ ( self ) -> Union[str, Any]:
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.', _lowercase, )
@cached_property
def a__ ( self ) -> "torch.device":
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
SCREAMING_SNAKE_CASE_ = torch.device('cpu' )
SCREAMING_SNAKE_CASE_ = 0
elif is_sagemaker_model_parallel_available():
SCREAMING_SNAKE_CASE_ = smp.local_rank()
SCREAMING_SNAKE_CASE_ = torch.device('cuda', _lowercase )
SCREAMING_SNAKE_CASE_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp', timeout=self.ddp_timeout_delta )
SCREAMING_SNAKE_CASE_ = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
SCREAMING_SNAKE_CASE_ = torch.device('cuda', self.local_rank )
SCREAMING_SNAKE_CASE_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
SCREAMING_SNAKE_CASE_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
SCREAMING_SNAKE_CASE_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl', timeout=self.ddp_timeout_delta )
SCREAMING_SNAKE_CASE_ = torch.device('cuda', self.local_rank )
SCREAMING_SNAKE_CASE_ = 1
if device.type == "cuda":
torch.cuda.set_device(_lowercase )
return device
@property
def a__ ( self ) -> Optional[Any]:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def a__ ( self ) -> Optional[Any]:
return not is_sagemaker_model_parallel_available()
@property
def a__ ( self ) -> Tuple:
return False
| 294
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 98
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__UpperCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def _snake_case ( A , A , A = 16000 ) -> Any:
lowerCAmelCase__ = int(round(sample_rate * max_length ) )
if len(A ) <= sample_length:
return wav
lowerCAmelCase__ = randint(0 , len(A ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class a__ :
'''simple docstring'''
lowercase__ : Optional[str] = field(default=a__ , metadata={"help": "Name of a dataset from the datasets package"} )
lowercase__ : Optional[str] = field(
default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowercase__ : Optional[str] = field(
default=a__ , metadata={"help": "A file containing the training audio paths and labels."} )
lowercase__ : Optional[str] = field(
default=a__ , metadata={"help": "A file containing the validation audio paths and labels."} )
lowercase__ : str = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowercase__ : str = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
lowercase__ : str = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
lowercase__ : str = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
lowercase__ : float = field(
default=2_0 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class a__ :
'''simple docstring'''
lowercase__ : str = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowercase__ : Optional[str] = field(
default=a__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase__ : Optional[str] = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
lowercase__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowercase__ : Optional[str] = field(
default=a__ , metadata={"help": "Name or path of preprocessor config."} )
lowercase__ : bool = field(
default=a__ , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
lowercase__ : bool = field(
default=a__ , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
lowercase__ : bool = field(
default=a__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowercase__ : Optional[bool] = field(
default=a__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowercase__ : bool = field(
default=a__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , lowerCamelCase_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def _snake_case ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , A , A )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(A )
transformers.utils.logging.set_verbosity(A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
lowerCAmelCase__ = DatasetDict()
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F"""{", ".join(raw_datasets["train"].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F"""{", ".join(raw_datasets["train"].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowerCAmelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowerCAmelCase__ = feature_extractor.model_input_names[0]
def train_transforms(A ):
lowerCAmelCase__ = []
for audio in batch[data_args.audio_column_name]:
lowerCAmelCase__ = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(A )
lowerCAmelCase__ = feature_extractor(A , sampling_rate=feature_extractor.sampling_rate )
lowerCAmelCase__ = {model_input_name: inputs.get(A )}
lowerCAmelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(A ):
lowerCAmelCase__ = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
lowerCAmelCase__ = feature_extractor(A , sampling_rate=feature_extractor.sampling_rate )
lowerCAmelCase__ = {model_input_name: inputs.get(A )}
lowerCAmelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCAmelCase__ = raw_datasets['''train'''].features[data_args.label_column_name].names
lowerCAmelCase__ , lowerCAmelCase__ = {}, {}
for i, label in enumerate(A ):
lowerCAmelCase__ = str(A )
lowerCAmelCase__ = label
# Load the accuracy metric from the datasets package
lowerCAmelCase__ = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(A ):
lowerCAmelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=A , references=eval_pred.label_ids )
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(A ) , labelaid=A , idalabel=A , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase__ = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(A , output_all_columns=A )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase__ = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(A , output_all_columns=A )
# Initialize our trainer
lowerCAmelCase__ = Trainer(
model=A , args=A , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=A , tokenizer=A , )
# Training
if training_args.do_train:
lowerCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=A )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCAmelCase__ = trainer.evaluate()
trainer.log_metrics('''eval''' , A )
trainer.save_metrics('''eval''' , A )
# Write model card and (optionally) push to hub
lowerCAmelCase__ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A )
else:
trainer.create_model_card(**A )
if __name__ == "__main__":
main()
| 98
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__lowerCAmelCase : str = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__lowerCAmelCase : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
__lowerCAmelCase : ClassVar[Features] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
__lowerCAmelCase : str = "question"
__lowerCAmelCase : str = "context"
__lowerCAmelCase : str = "answers"
@property
def _a ( self ):
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 102
|
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
_enforce_args(UpperCamelCase_ , UpperCamelCase_ )
if n == 0:
return 0
UpperCamelCase = float("""-inf""" )
for i in range(1 , n + 1 ):
UpperCamelCase = max(
UpperCamelCase_ , prices[i - 1] + naive_cut_rod_recursive(n - i , UpperCamelCase_ ) )
return max_revue
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
_enforce_args(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase = float("""-inf""" )
for i in range(1 , n + 1 ):
UpperCamelCase = max(
UpperCamelCase_ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , UpperCamelCase_ , UpperCamelCase_ ) , )
UpperCamelCase = max_revenue
return max_rev[n]
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
_enforce_args(UpperCamelCase_ , UpperCamelCase_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase = [float("""-inf""" ) for _ in range(n + 1 )]
UpperCamelCase = 0
for i in range(1 , n + 1 ):
UpperCamelCase = max_rev[i]
for j in range(1 , i + 1 ):
UpperCamelCase = max(UpperCamelCase_ , prices[j - 1] + max_rev[i - j] )
UpperCamelCase = max_revenue_i
return max_rev[n]
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
if n < 0:
UpperCamelCase = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(UpperCamelCase_ )
if n > len(UpperCamelCase_ ):
UpperCamelCase = (
"""Each integral piece of rod must have a corresponding price. """
f"""Got n = {n} but length of prices = {len(UpperCamelCase_ )}"""
)
raise ValueError(UpperCamelCase_ )
def lowercase( ) -> str:
'''simple docstring'''
UpperCamelCase = [6, 10, 12, 15, 20, 23]
UpperCamelCase = len(UpperCamelCase_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase = 36
UpperCamelCase = top_down_cut_rod(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = bottom_up_cut_rod(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = naive_cut_rod_recursive(UpperCamelCase_ , UpperCamelCase_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 537
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case: int = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case: Any = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case: Dict = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__snake_case: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 701
|
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = 42
a_ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 460
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCamelCase ( unittest.TestCase ):
def snake_case_ ( self : int ) -> List[Any]:
_a : List[Any] = tempfile.mkdtemp()
_a : List[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
_a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_a : List[Any] = {
"do_resize": True,
"size": {"height": 224, "width": 224},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
_a : Tuple = os.path.join(self.tmpdirname , __snake_case )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__snake_case , __snake_case )
def snake_case_ ( self : Any , **__snake_case : List[str] ) -> Optional[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case_ ( self : Tuple , **__snake_case : List[str] ) -> Optional[Any]:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case_ ( self : Optional[Any] , **__snake_case : Optional[int] ) -> Optional[int]:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : Tuple ) -> Dict:
_a : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : Tuple = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : Any ) -> Union[str, Any]:
_a : Dict = self.get_tokenizer()
_a : str = self.get_rust_tokenizer()
_a : Optional[Any] = self.get_image_processor()
_a : Any = ChineseCLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_slow.save_pretrained(self.tmpdirname )
_a : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__snake_case )
_a : Optional[Any] = ChineseCLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_fast.save_pretrained(self.tmpdirname )
_a : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __snake_case )
self.assertIsInstance(processor_fast.tokenizer , __snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __snake_case )
self.assertIsInstance(processor_fast.image_processor , __snake_case )
def snake_case_ ( self : List[Any] ) -> int:
_a : int = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Optional[Any] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
_a : Dict = self.get_image_processor(do_normalize=__snake_case )
_a : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=__snake_case )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def snake_case_ ( self : Optional[Any] ) -> List[str]:
_a : Optional[Any] = self.get_image_processor()
_a : Optional[int] = self.get_tokenizer()
_a : Dict = ChineseCLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_a : List[Any] = self.prepare_image_inputs()
_a : List[str] = image_processor(__snake_case , return_tensors='''np''' )
_a : str = processor(images=__snake_case , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case_ ( self : Optional[int] ) -> str:
_a : Any = self.get_image_processor()
_a : Tuple = self.get_tokenizer()
_a : List[str] = ChineseCLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_a : Optional[int] = "Alexandra,T-shirt的价格是15便士。"
_a : int = processor(text=__snake_case )
_a : List[str] = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : List[Any] ) -> Dict:
_a : Any = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Tuple = ChineseCLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_a : Any = "Alexandra,T-shirt的价格是15便士。"
_a : Tuple = self.prepare_image_inputs()
_a : Optional[Any] = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def snake_case_ ( self : Tuple ) -> Tuple:
_a : Tuple = self.get_image_processor()
_a : Tuple = self.get_tokenizer()
_a : Union[str, Any] = ChineseCLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_a : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : Tuple = processor.batch_decode(__snake_case )
_a : Optional[Any] = tokenizer.batch_decode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def snake_case_ ( self : Union[str, Any] ) -> Union[str, Any]:
_a : Optional[Any] = self.get_image_processor()
_a : Optional[int] = self.get_tokenizer()
_a : Optional[Any] = ChineseCLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_a : Optional[int] = "Alexandra,T-shirt的价格是15便士。"
_a : Optional[Any] = self.prepare_image_inputs()
_a : Union[str, Any] = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 471
|
import random
from typing import Any
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> list[Any]:
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ : Union[str, Any] = random.randint(0 ,len(SCREAMING_SNAKE_CASE_ ) - 1 )
lowercase__ : str = random.randint(0 ,len(SCREAMING_SNAKE_CASE_ ) - 1 )
lowercase__ , lowercase__ : Any = data[b], data[a]
return data
if __name__ == "__main__":
__a : Tuple = [0, 1, 2, 3, 4, 5, 6, 7]
__a : str = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 397
| 0
|
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''spiece.model'''}
A_ = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
A_ = {
'''AI-Sweden/gpt-sw3-126m''': 20_48,
'''AI-Sweden/gpt-sw3-350m''': 20_48,
'''AI-Sweden/gpt-sw3-1.6b''': 20_48,
'''AI-Sweden/gpt-sw3-6.7b''': 20_48,
'''AI-Sweden/gpt-sw3-20b''': 20_48,
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self: Dict, a_: str, a_: Union[str, Any]=False, a_: Any=False, a_: Tuple=False, a_: Tuple=None, a_: str=None, a_: List[Any]=None, a_: Optional[Any]=None, a_: Optional[Dict[str, Any]] = None, **a_: Union[str, Any], ):
'''simple docstring'''
_snake_case : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_snake_case : Tuple = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
_snake_case : str = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_snake_case : Tuple = """<|endoftext|>""" if eos_token is None else eos_token
_snake_case : str = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_snake_case : List[Any] = unk_token if pad_token is None else pad_token
_snake_case : Union[str, Any] = eos_token if bos_token is None else bos_token
else:
_snake_case : str = """<pad>""" if pad_token is None else pad_token
_snake_case : List[str] = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=a_, remove_space=a_, keep_accents=a_, bos_token=a_, eos_token=a_, unk_token=a_, pad_token=a_, sp_model_kwargs=self.sp_model_kwargs, **a_, )
_snake_case : Dict = do_lower_case
_snake_case : Any = remove_space
_snake_case : Optional[int] = keep_accents
_snake_case : Optional[int] = vocab_file
_snake_case : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
# Used for whitespace normalization in input texts
# fmt : off
_snake_case : str = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_snake_case : Union[str, Any] = re.compile(
f"[{''.join(map(a_, list(range(0, 9 ) ) + list(range(11, 32 ) ) + list(range(127, 160 ) ) + [160, 173, 8_203] ) )}]" )
def __getstate__( self: Any ):
'''simple docstring'''
_snake_case : Tuple = self.__dict__.copy()
_snake_case : int = None
return state
def __setstate__( self: Any, a_: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = d
# for backward compatibility
if not hasattr(self, """sp_model_kwargs""" ):
_snake_case : Union[str, Any] = {}
_snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase_ ( self: Dict, a_: str ):
'''simple docstring'''
_snake_case : Dict = self.non_printing_characters_re.sub("""""", a_ )
# Normalize whitespaces
_snake_case : Any = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
_snake_case : Optional[Any] = unicodedata.normalize("""NFC""", a_ )
return text
def UpperCamelCase_ ( self: str, a_: str, **a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = self.preprocess_text(a_ )
return self.sp_model.encode(a_, out_type=a_ )
def UpperCamelCase_ ( self: List[Any], a_: str ):
'''simple docstring'''
return self.sp_model.PieceToId(a_ )
def UpperCamelCase_ ( self: Tuple, a_: int ):
'''simple docstring'''
return self.sp_model.IdToPiece(a_ )
@staticmethod
def UpperCamelCase_ ( a_: str ):
'''simple docstring'''
return out_string
def UpperCamelCase_ ( self: Tuple, a_: List[str] ):
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[Any] = """"""
_snake_case : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a_ ) + token
_snake_case : int = True
_snake_case : int = []
else:
current_sub_tokens.append(a_ )
_snake_case : Union[str, Any] = False
out_string += self.sp_model.decode(a_ )
return out_string
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_snake_case : Optional[Any] = os.path.join(
a_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_, """wb""" ) as fi:
_snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
def UpperCamelCase_ ( self: Any, a_: Union[str, List[str]], a_: Union[str, bool] = False ):
'''simple docstring'''
if isinstance(a_, a_ ):
_snake_case : str = self.preprocess_text(a_ )
_snake_case : Optional[Any] = self.sp_model.encode(a_ )
else:
_snake_case : int = [self.preprocess_text(a_ ) for t in text]
_snake_case : List[Any] = self.sp_model.encode(a_ )
if return_tensors is True or return_tensors == "pt":
_snake_case : List[str] = torch.tensor(a_ )
return token_ids
def UpperCamelCase_ ( self: Optional[int], a_: Union[int, List[int]] ):
'''simple docstring'''
return self.sp_model.decode(a_ )
def UpperCamelCase_ ( self: Tuple, a_: "Conversation" ):
'''simple docstring'''
_snake_case : Union[str, Any] = [f"User: {text}" if is_user else f"Bot: {text}" for is_user, text in conversation.iter_texts()]
_snake_case : List[str] = (
f"{self.eos_token}{self.bos_token}" + f"{self.bos_token}".join(a_ ) + f"{self.bos_token}Bot:"
)
return self.encode(text=a_ )
| 704
|
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ):
_snake_case : Union[str, Any] = []
for k, v in d.items():
_snake_case : List[str] = parent_key + sep + k if parent_key else k
if isinstance(snake_case__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case__ )
_snake_case : Dict = argparse.Namespace()
with open(snake_case__ , """r""" ) as yaml_file:
try:
_snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader )
_snake_case : Any = flatten_yaml_as_dict(snake_case__ )
for k, v in flat_cfg.items():
setattr(snake_case__ , snake_case__ , snake_case__ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) )
return config
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = MobileViTVaConfig()
_snake_case : Optional[int] = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_snake_case : Dict = 10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Union[str, Any] = 3_84
else:
_snake_case : Optional[Any] = 2_56
_snake_case : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_snake_case : str = 2_10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Dict = 3_84
else:
_snake_case : Union[str, Any] = 2_56
_snake_case : Tuple = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_snake_case : Tuple = 1_51
_snake_case : str = 5_12
_snake_case : List[Any] = """ade20k-id2label.json"""
_snake_case : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
_snake_case : List[Any] = 21
_snake_case : List[str] = 5_12
_snake_case : int = """pascal-voc-id2label.json"""
_snake_case : int = True
# orig_config
_snake_case : int = load_orig_config_file(snake_case__ )
assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
_snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 )
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
_snake_case : Union[str, Any] = """huggingface/label-files"""
_snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : Tuple = idalabel
_snake_case : Any = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : List[str] = dct.pop(snake_case__ )
_snake_case : List[Any] = val
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ):
"""simple docstring"""
if base_model:
_snake_case : Any = """"""
else:
_snake_case : Union[str, Any] = """mobilevitv2."""
_snake_case : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_snake_case : List[str] = k[8:]
else:
_snake_case : str = k
if ".block." in k:
_snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
_snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
_snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
_snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." )
for i in [1, 2]:
if F"layer_{i}." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F"layer_{i}.0." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if F"layer_{i}.1.local_rep.0." in k:
_snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if F"layer_{i}.1.local_rep.1." in k:
_snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
_snake_case : Optional[Any] = [0, 1]
elif i == 4:
_snake_case : Any = [0, 1, 2, 3]
elif i == 5:
_snake_case : List[Any] = [0, 1, 2]
for j in j_in:
if F"layer_{i}.1.global_rep.{j}." in k:
_snake_case : Any = k_new.replace(
F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if F"layer_{i}.1.global_rep.{j+1}." in k:
_snake_case : List[Any] = k_new.replace(
F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." )
if F"layer_{i}.1.conv_proj." in k:
_snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
_snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
_snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
_snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
_snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
_snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[str] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case__ )
for k in keys_to_ignore:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval()
_snake_case : List[Any] = False
else:
_snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval()
_snake_case : Optional[Any] = False
# remove and rename some keys of load the original model
_snake_case : Union[str, Any] = checkpoint
remove_unused_keys(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# load modified state_dict
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_snake_case : List[str] = outputs.logits
_snake_case : Any = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] )
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 28
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( lowercase_ ):
"""simple docstring"""
a_ = ['''image_processor''', '''tokenizer''']
a_ = '''FlavaImageProcessor'''
a_ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase_ , )
a_ : List[str] = kwargs.pop("""feature_extractor""" )
a_ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Tuple = self.image_processor
def __call__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = True , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
a_ : Optional[Any] = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if images is not None:
a_ : Optional[Any] = self.image_processor(
lowerCAmelCase_ , return_image_mask=lowerCAmelCase_ , return_codebook_pixels=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if text is not None and images is not None:
encoding.update(lowerCAmelCase_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def _lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Dict = self.tokenizer.model_input_names
a_ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase_ , )
return self.image_processor_class
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCAmelCase_ , )
return self.image_processor
| 577
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
SCREAMING_SNAKE_CASE_ = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = '''albert'''
def __init__( self : List[Any] , snake_case : str=30000 , snake_case : Optional[int]=128 , snake_case : List[Any]=4096 , snake_case : str=12 , snake_case : str=1 , snake_case : Dict=64 , snake_case : Optional[Any]=16384 , snake_case : int=1 , snake_case : Any="gelu_new" , snake_case : List[str]=0 , snake_case : Any=0 , snake_case : List[str]=512 , snake_case : Optional[Any]=2 , snake_case : int=0.02 , snake_case : Tuple=1e-12 , snake_case : str=0.1 , snake_case : Optional[Any]="absolute" , snake_case : List[str]=0 , snake_case : List[Any]=2 , snake_case : Optional[int]=3 , **snake_case : str , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
_snake_case : Optional[Any] = vocab_size
_snake_case : int = embedding_size
_snake_case : List[str] = hidden_size
_snake_case : Union[str, Any] = num_hidden_layers
_snake_case : Optional[Any] = num_hidden_groups
_snake_case : Tuple = num_attention_heads
_snake_case : Any = inner_group_num
_snake_case : Union[str, Any] = hidden_act
_snake_case : Optional[int] = intermediate_size
_snake_case : str = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : List[Any] = max_position_embeddings
_snake_case : Dict = type_vocab_size
_snake_case : Dict = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : str = classifier_dropout_prob
_snake_case : Union[str, Any] = position_embedding_type
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
if self.task == "multiple-choice":
_snake_case : Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 517
| 0
|
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = int(lowerCamelCase__ )
assert noofclusters < len(lowerCamelCase__ )
# Find out the dimensionality
lowerCAmelCase__ = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowerCAmelCase__ = list(range(len(lowerCamelCase__ ) ) )
shuffle(lowerCamelCase__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowerCAmelCase__ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowerCAmelCase__ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowerCAmelCase__ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowerCamelCase__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowerCAmelCase__ = tf.placeholder("""float64""" , [dim] )
lowerCAmelCase__ = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowerCamelCase__ , lowerCamelCase__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowerCAmelCase__ = [tf.Variable(0 ) for i in range(len(lowerCamelCase__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowerCAmelCase__ = tf.placeholder("""int32""" )
lowerCAmelCase__ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowerCamelCase__ , lowerCamelCase__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowerCAmelCase__ = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowerCAmelCase__ = tf.reduce_mean(lowerCamelCase__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowerCAmelCase__ = tf.placeholder("""float""" , [dim] )
lowerCAmelCase__ = tf.placeholder("""float""" , [dim] )
lowerCAmelCase__ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowerCamelCase__ , lowerCamelCase__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowerCAmelCase__ = tf.placeholder("""float""" , [noofclusters] )
lowerCAmelCase__ = tf.argmin(lowerCamelCase__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowerCAmelCase__ = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowerCamelCase__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowerCAmelCase__ = 100
for _ in range(lowerCamelCase__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowerCamelCase__ ) ):
lowerCAmelCase__ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowerCAmelCase__ = [
sess.run(lowerCamelCase__ , feed_dict={va: vect, va: sess.run(lowerCamelCase__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowerCAmelCase__ = sess.run(
lowerCamelCase__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowerCamelCase__ ):
# Collect all the vectors assigned to this cluster
lowerCAmelCase__ = [
vectors[i]
for i in range(len(lowerCamelCase__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowerCAmelCase__ = sess.run(
lowerCamelCase__ , feed_dict={mean_input: array(lowerCamelCase__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowerCAmelCase__ = sess.run(lowerCamelCase__ )
lowerCAmelCase__ = sess.run(lowerCamelCase__ )
return centroids, assignments
| 715
|
"""simple docstring"""
import os
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = os.path.dirname(os.path.realpath(lowerCamelCase__ ) )
lowerCAmelCase__ = os.path.join(lowerCamelCase__ , """triangle.txt""" )
with open(lowerCamelCase__ ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = []
for line in triangle:
lowerCAmelCase__ = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(lowerCamelCase__ ) )
a.append(lowerCamelCase__ )
for i in range(1 , len(lowerCamelCase__ ) ):
for j in range(len(a[i] ) ):
lowerCAmelCase__ = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCAmelCase__ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCamelCase__ , lowerCamelCase__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 674
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """philschmid/bart-large-cnn-samsum"""
UpperCAmelCase__ = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
UpperCAmelCase__ = """summarizer"""
UpperCAmelCase__ = AutoTokenizer
UpperCAmelCase__ = AutoModelForSeqaSeqLM
UpperCAmelCase__ = ["""text"""]
UpperCAmelCase__ = ["""text"""]
def A_ ( self : Tuple , UpperCAmelCase : List[Any] ) -> List[str]:
return self.pre_processor(UpperCAmelCase , return_tensors='pt' , truncation=UpperCAmelCase )
def A_ ( self : Optional[int] , UpperCAmelCase : Dict ) -> str:
return self.model.generate(**UpperCAmelCase )[0]
def A_ ( self : Tuple , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
return self.pre_processor.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
| 295
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict=7 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : List[str]=18 , UpperCAmelCase : Dict=30 , UpperCAmelCase : List[Any]=400 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=True , UpperCAmelCase : Tuple=False , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase : List[str]=[0.5, 0.5, 0.5] , ) -> List[str]:
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : str = batch_size
lowerCamelCase__ : List[str] = num_channels
lowerCamelCase__ : str = image_size
lowerCamelCase__ : List[Any] = min_resolution
lowerCamelCase__ : int = max_resolution
lowerCamelCase__ : int = do_resize
lowerCamelCase__ : int = size if size is not None else {'height': 18, 'width': 20}
lowerCamelCase__ : Tuple = do_thumbnail
lowerCamelCase__ : str = do_align_axis
lowerCamelCase__ : str = do_pad
lowerCamelCase__ : Optional[Any] = do_normalize
lowerCamelCase__ : List[str] = image_mean
lowerCamelCase__ : Dict = image_std
def A_ ( self : str ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = DonutImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) -> int:
lowerCamelCase__ : Union[str, Any] = DonutImageProcessingTester(self )
@property
def A_ ( self : Dict ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Dict ) -> Any:
lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_thumbnail' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_align_long_axis' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_pad' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'image_std' ) )
def A_ ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
lowerCamelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
lowerCamelCase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def A_ ( self : Optional[Any] ) -> List[str]:
pass
@is_flaky()
def A_ ( self : List[str] ) -> Any:
# Initialize image_processing
lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
lowerCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCamelCase__ : Tuple = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def A_ ( self : int ) -> Tuple:
# Initialize image_processing
lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
lowerCamelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCamelCase__ : Tuple = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def A_ ( self : Any ) -> Tuple:
# Initialize image_processing
lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
lowerCamelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCamelCase__ : Dict = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 295
| 1
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=0.2 , snake_case__=0.2 ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = bp_numa
lowerCAmelCase : Any = bp_numa
lowerCAmelCase : Optional[Any] = bp_numa
lowerCAmelCase : Tuple = conva_get[:2]
lowerCAmelCase : Dict = conva_get[2]
lowerCAmelCase : Optional[int] = size_pa
lowerCAmelCase : List[str] = rate_w
lowerCAmelCase : List[Any] = rate_t
lowerCAmelCase : Any = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowerCAmelCase : Optional[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCAmelCase : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCAmelCase : List[Any] = -2 * np.random.rand(self.conva[1] ) + 1
lowerCAmelCase : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
lowerCAmelCase : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(snake_case__ , "wb" ) as f:
pickle.dump(snake_case__ , snake_case__ )
print(f"""Model saved: {save_path}""" )
@classmethod
def lowercase__ ( cls , snake_case__ ):
"""simple docstring"""
with open(snake_case__ , "rb" ) as f:
lowerCAmelCase : Union[str, Any] = pickle.load(snake_case__ ) # noqa: S301
lowerCAmelCase : Optional[Any] = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
lowerCAmelCase : Any = model_dic.get("size_pooling1" )
lowerCAmelCase : List[Any] = model_dic.get("num_bp1" )
lowerCAmelCase : Optional[int] = model_dic.get("num_bp2" )
lowerCAmelCase : Optional[Any] = model_dic.get("num_bp3" )
lowerCAmelCase : Tuple = model_dic.get("rate_weight" )
lowerCAmelCase : Any = model_dic.get("rate_thre" )
# create model instance
lowerCAmelCase : Any = CNN(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# modify model parameter
lowerCAmelCase : Any = model_dic.get("w_conv1" )
lowerCAmelCase : Tuple = model_dic.get("wkj" )
lowerCAmelCase : List[Any] = model_dic.get("vji" )
lowerCAmelCase : int = model_dic.get("thre_conv1" )
lowerCAmelCase : List[Any] = model_dic.get("thre_bp2" )
lowerCAmelCase : Union[str, Any] = model_dic.get("thre_bp3" )
return conv_ins
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x ))
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return round(snake_case__ , 3 )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = convs[0]
lowerCAmelCase : int = convs[1]
lowerCAmelCase : Optional[Any] = np.shape(snake_case__ )[0]
# get the data slice of original image data, data_focus
lowerCAmelCase : Any = []
for i_focus in range(0 , size_data - size_conv + 1 , snake_case__ ):
for j_focus in range(0 , size_data - size_conv + 1 , snake_case__ ):
lowerCAmelCase : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(snake_case__ )
# calculate the feature map of every single kernel, and saved as list of matrix
lowerCAmelCase : Dict = []
lowerCAmelCase : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(snake_case__ ):
lowerCAmelCase : List[str] = []
for i_focus in range(len(snake_case__ ) ):
lowerCAmelCase : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(snake_case__ ) )
lowerCAmelCase : int = np.asmatrix(snake_case__ ).reshape(
snake_case__ , snake_case__ )
data_featuremap.append(snake_case__ )
# expanding the data slice to One dimenssion
lowerCAmelCase : str = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(snake_case__ ) )
lowerCAmelCase : str = np.asarray(snake_case__ )
return focus_list, data_featuremap
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__="average_pool" ):
"""simple docstring"""
lowerCAmelCase : int = len(featuremaps[0] )
lowerCAmelCase : Optional[int] = int(size_map / size_pooling )
lowerCAmelCase : str = []
for i_map in range(len(snake_case__ ) ):
lowerCAmelCase : Optional[int] = featuremaps[i_map]
lowerCAmelCase : int = []
for i_focus in range(0 , snake_case__ , snake_case__ ):
for j_focus in range(0 , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(snake_case__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(snake_case__ ) )
lowerCAmelCase : str = np.asmatrix(snake_case__ ).reshape(snake_case__ , snake_case__ )
featuremap_pooled.append(snake_case__ )
return featuremap_pooled
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = []
for i in range(len(snake_case__ ) ):
lowerCAmelCase : str = np.shape(data[i] )
lowerCAmelCase : Union[str, Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
lowerCAmelCase : Tuple = data_listed.getA().tolist()[0]
data_expanded.extend(snake_case__ )
lowerCAmelCase : List[str] = np.asarray(snake_case__ )
return data_expanded
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = np.asarray(snake_case__ )
lowerCAmelCase : Dict = np.shape(snake_case__ )
lowerCAmelCase : Tuple = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = []
lowerCAmelCase : str = 0
for i_map in range(snake_case__ ):
lowerCAmelCase : int = np.ones((size_map, size_map) )
for i in range(0 , snake_case__ , snake_case__ ):
for j in range(0 , snake_case__ , snake_case__ ):
lowerCAmelCase : str = pd_pool[
i_pool
]
lowerCAmelCase : List[str] = i_pool + 1
lowerCAmelCase : Tuple = np.multiply(
snake_case__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(snake_case__ )
return pd_all
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=bool ):
"""simple docstring"""
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(snake_case__ )) )
print((" - - Shape: Teach_Data ", np.shape(snake_case__ )) )
lowerCAmelCase : Any = 0
lowerCAmelCase : Dict = []
lowerCAmelCase : Tuple = 10_000
while rp < n_repeat and mse >= error_accuracy:
lowerCAmelCase : str = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(snake_case__ ) ):
# print('------------Learning Image: %d--------------'%p)
lowerCAmelCase : Dict = np.asmatrix(datas_train[p] )
lowerCAmelCase : str = np.asarray(datas_teach[p] )
lowerCAmelCase , lowerCAmelCase : int = self.convolute(
snake_case__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase : Union[str, Any] = self.pooling(snake_case__ , self.size_poolinga )
lowerCAmelCase : Tuple = np.shape(snake_case__ )
lowerCAmelCase : Optional[int] = self._expand(snake_case__ )
lowerCAmelCase : Optional[int] = data_bp_input
lowerCAmelCase : int = np.dot(snake_case__ , self.vji.T ) - self.thre_bpa
lowerCAmelCase : Tuple = self.sig(snake_case__ )
lowerCAmelCase : Union[str, Any] = np.dot(snake_case__ , self.wkj.T ) - self.thre_bpa
lowerCAmelCase : Optional[Any] = self.sig(snake_case__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowerCAmelCase : Union[str, Any] = np.multiply(
(data_teach - bp_outa) , np.multiply(snake_case__ , (1 - bp_outa) ) )
lowerCAmelCase : str = np.multiply(
np.dot(snake_case__ , self.wkj ) , np.multiply(snake_case__ , (1 - bp_outa) ) )
lowerCAmelCase : str = np.dot(snake_case__ , self.vji )
lowerCAmelCase : Optional[int] = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowerCAmelCase : List[Any] = pd_conva_pooled.T.getA().tolist()
lowerCAmelCase : int = self._calculate_gradient_from_pool(
snake_case__ , snake_case__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowerCAmelCase : List[Any] = self._expand_mat(pd_conva_all[k_conv] )
lowerCAmelCase : Optional[Any] = self.rate_weight * np.dot(snake_case__ , snake_case__ )
lowerCAmelCase : Union[str, Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowerCAmelCase : Tuple = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowerCAmelCase : Any = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowerCAmelCase : Dict = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowerCAmelCase : int = self.thre_bpa - pd_k_all * self.rate_thre
lowerCAmelCase : Optional[int] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowerCAmelCase : Optional[int] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowerCAmelCase : Union[str, Any] = rp + 1
lowerCAmelCase : Dict = error_count / patterns
all_mse.append(snake_case__ )
def draw_error():
lowerCAmelCase : List[str] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(snake_case__ , "+-" )
plt.plot(snake_case__ , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(snake_case__ , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(snake_case__ )) )
for p in range(len(snake_case__ ) ):
lowerCAmelCase : str = np.asmatrix(datas_test[p] )
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.convolute(
snake_case__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase : List[str] = self.pooling(snake_case__ , self.size_poolinga )
lowerCAmelCase : Dict = self._expand(snake_case__ )
lowerCAmelCase : Union[str, Any] = data_bp_input
lowerCAmelCase : List[str] = bp_outa * self.vji.T - self.thre_bpa
lowerCAmelCase : int = self.sig(snake_case__ )
lowerCAmelCase : Tuple = bp_outa * self.wkj.T - self.thre_bpa
lowerCAmelCase : Dict = self.sig(snake_case__ )
produce_out.extend(bp_outa.getA().tolist() )
lowerCAmelCase : Union[str, Any] = [list(map(self.do_round , snake_case__ ) ) for each in produce_out]
return np.asarray(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = np.asmatrix(snake_case__ )
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.convolute(
snake_case__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase : str = self.pooling(snake_case__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 681
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0 , SCREAMING_SNAKE_CASE : int = 2_2 ):
'''simple docstring'''
lowerCAmelCase : Dict = range(1 , SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = range(1 , SCREAMING_SNAKE_CASE )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"{solution(10, 22) = }")
| 681
| 1
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[Any] ) -> Dict:
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE_ : List[str] =version.parse(accelerate.__version__ ).base_version
if version.parse(__lowerCAmelCase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self : Union[str, Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *__lowerCAmelCase , **__lowerCAmelCase )
return wrapper
| 443
|
from copy import deepcopy
class __magic_name__ :
'''simple docstring'''
def __init__( self:int , _a:list[int] | None = None , _a:int | None = None ):
if arr is None and size is not None:
snake_case__ = size
snake_case__ = [0] * size
elif arr is not None:
self.init(_a )
else:
raise ValueError('''Either arr or size must be specified''' )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:list[int] ):
snake_case__ = len(_a )
snake_case__ = deepcopy(_a )
for i in range(1 , self.size ):
snake_case__ = self.next_(_a )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case__ = self.next_(_a )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case__ = self.next_(_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
self.add(_a , value - self.get(_a ) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int ):
if right == 0:
return 0
snake_case__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case__ = self.prev(_a )
return result
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
return self.prefix(_a ) - self.prefix(_a )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
return self.query(_a , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
value -= self.tree[0]
if value < 0:
return -1
snake_case__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
| 0
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=7 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : int=18 , __lowerCamelCase : Union[str, Any]=30 , __lowerCamelCase : int=400 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Any=[0.5, 0.5, 0.5] , __lowerCamelCase : str=[0.5, 0.5, 0.5] , __lowerCamelCase : int=False , ):
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 20, "width": 20}
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_center_crop
SCREAMING_SNAKE_CASE = crop_size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = do_reduce_labels
def _snake_case ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __a ( ):
SCREAMING_SNAKE_CASE = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE = Image.open(dataset[0]["file"] )
SCREAMING_SNAKE_CASE = Image.open(dataset[1]["file"] )
return image, map
def __a ( ):
SCREAMING_SNAKE_CASE = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE = Image.open(ds[1]["file"] )
SCREAMING_SNAKE_CASE = Image.open(ds[2]["file"] )
SCREAMING_SNAKE_CASE = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = BeitImageProcessor if is_vision_available() else None
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = BeitImageProcessingTester(self )
@property
def _snake_case ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(__lowerCamelCase , "center_crop" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__lowerCamelCase )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase )
def _snake_case ( self : Dict ):
pass
def _snake_case ( self : List[Any] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _snake_case ( self : str ):
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _snake_case ( self : Tuple ):
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _snake_case ( self : Tuple ):
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
SCREAMING_SNAKE_CASE = []
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test not batched input (PIL images)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched input (PIL images)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = prepare_semantic_batch_inputs()
SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
def _snake_case ( self : str ):
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 150 )
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
| 698
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__A : Optional[Any] = datasets.load_iris()
__A : Optional[Any] = np.array(data['data'])
__A : Optional[int] = np.array(data['target'])
__A : Union[str, Any] = data['target_names']
__A , __A , __A , __A : Optional[int] = train_test_split(X, y)
def __a ( A__ : Optional[int] , A__ : Dict ):
return np.linalg.norm(np.array(A__ ) - np.array(A__ ) )
def __a ( A__ : Optional[Any] , A__ : int , A__ : Dict , A__ : Optional[Any] , A__ : Dict=5 ):
SCREAMING_SNAKE_CASE = zip(A__ , A__ )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE = []
for data_point in data:
SCREAMING_SNAKE_CASE = euclidean_distance(data_point[0] , A__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE = [i[1] for i in sorted(A__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE = Counter(A__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 698
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.