code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 79
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = CycleDiffusionPipeline
snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_A = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A = CLIPTextModel(__UpperCAmelCase )
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=0 ):
'''simple docstring'''
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
_A = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith("mps" ):
_A = torch.manual_seed(__UpperCAmelCase )
else:
_A = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
_A = {
"prompt": "An astronaut riding an elephant",
"source_prompt": "An astronaut riding a horse",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"eta": 0.1,
"strength": 0.8,
"guidance_scale": 3,
"source_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = CycleDiffusionPipeline(**__UpperCAmelCase )
_A = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = self.get_dummy_inputs(__UpperCAmelCase )
_A = pipe(**__UpperCAmelCase )
_A = output.images
_A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_A = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.get_dummy_components()
for name, module in components.items():
if hasattr(__UpperCAmelCase , "half" ):
_A = module.half()
_A = CycleDiffusionPipeline(**__UpperCAmelCase )
_A = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_A = self.get_dummy_inputs(__UpperCAmelCase )
_A = pipe(**__UpperCAmelCase )
_A = output.images
_A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_A = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
_A = init_image.resize((512, 512) )
_A = "CompVis/stable-diffusion-v1-4"
_A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" )
_A = CycleDiffusionPipeline.from_pretrained(
__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_A = "A black colored car"
_A = "A blue colored car"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , )
_A = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
_A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
_A = init_image.resize((512, 512) )
_A = "CompVis/stable-diffusion-v1-4"
_A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" )
_A = CycleDiffusionPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_A = "A black colored car"
_A = "A blue colored car"
_A = torch.manual_seed(0 )
_A = pipe(
prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , )
_A = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 79
| 1
|
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
__lowercase = [int(A__ ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(A__ ) == 4 and all(0 <= int(A__ ) <= 254 for octet in octets )
if __name__ == "__main__":
lowerCAmelCase__ = input().strip()
lowerCAmelCase__ = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(f'{ip} is a {valid_or_invalid} IP v4 address.')
| 52
|
'''simple docstring'''
lowerCAmelCase__ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def _A ( ):
"""simple docstring"""
__lowercase = input('''Enter message: ''' )
__lowercase = input('''Enter key [alphanumeric]: ''' )
__lowercase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
__lowercase = '''encrypt'''
__lowercase = encrypt_message(A__ , A__ )
elif mode.lower().startswith('''d''' ):
__lowercase = '''decrypt'''
__lowercase = decrypt_message(A__ , A__ )
print(F"\n{mode.title()}ed message:" )
print(A__ )
def _A ( A__ , A__ ):
"""simple docstring"""
return translate_message(A__ , A__ , '''encrypt''' )
def _A ( A__ , A__ ):
"""simple docstring"""
return translate_message(A__ , A__ , '''decrypt''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = 0
__lowercase = key.upper()
for symbol in message:
__lowercase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(A__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(A__ ):
__lowercase = 0
else:
translated.append(A__ )
return "".join(A__ )
if __name__ == "__main__":
main()
| 52
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
"""simple docstring"""
_snake_case = KandinskyInpaintPipeline
_snake_case = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_snake_case = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_snake_case = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_snake_case = False
@property
def A__ ( self )-> Tuple:
'''simple docstring'''
return 32
@property
def A__ ( self )-> str:
'''simple docstring'''
return 32
@property
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
return self.time_input_dim
@property
def A__ ( self )-> Tuple:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ ( self )-> Any:
'''simple docstring'''
return 100
@property
def A__ ( self )-> str:
'''simple docstring'''
__UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def A__ ( self )-> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__UpperCamelCase = MultilingualCLIP(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def A__ ( self )-> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__UpperCamelCase = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def A__ ( self )-> Dict:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self )-> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = self.dummy_tokenizer
__UpperCamelCase = self.dummy_unet
__UpperCamelCase = self.dummy_movq
__UpperCamelCase = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=SCREAMING_SNAKE_CASE_ , )
__UpperCamelCase = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 )-> List[str]:
'''simple docstring'''
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(SCREAMING_SNAKE_CASE_ )
# create init_image
__UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
__UpperCamelCase = np.ones((64, 64) , dtype=np.floataa )
__UpperCamelCase = 0
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
__UpperCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__UpperCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def A__ ( self )-> List[str]:
'''simple docstring'''
__UpperCamelCase = '''cpu'''
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = output.images
__UpperCamelCase = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(F"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def A__ ( self )-> Optional[int]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
__UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__UpperCamelCase = np.ones((768, 768) , dtype=np.floataa )
__UpperCamelCase = 0
__UpperCamelCase = '''a hat'''
__UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
__UpperCamelCase = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCamelCase , __UpperCamelCase = pipe_prior(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__UpperCamelCase = pipeline(
SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , mask_image=SCREAMING_SNAKE_CASE_ , image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
__UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 328
|
'''simple docstring'''
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
_snake_case = f.readlines()
_snake_case = f"""class {class_name}("""
_snake_case = f"""{4 * " "}def {test_name}("""
_snake_case = f"""{8 * " "}{correct_line.split()[0]}"""
_snake_case = f"""{16 * " "}{correct_line.split()[0]}"""
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = 0
_snake_case = 0
_snake_case = []
for line in lines:
if line.startswith(_SCREAMING_SNAKE_CASE ):
_snake_case = True
elif in_class and line.startswith(_SCREAMING_SNAKE_CASE ):
_snake_case = True
elif in_class and in_func and (line.startswith(_SCREAMING_SNAKE_CASE ) or line.startswith(_SCREAMING_SNAKE_CASE )):
_snake_case = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_snake_case = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_snake_case = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * " "}{correct_line}""" )
_snake_case = _snake_case = _snake_case = _snake_case = False
else:
new_lines.append(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
for line in new_lines:
f.write(_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if fail is not None:
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
_snake_case = {l.strip() for l in f.readlines()}
else:
_snake_case = None
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
_snake_case = f.readlines()
_snake_case = defaultdict(_SCREAMING_SNAKE_CASE )
for line in correct_lines:
_snake_case, _snake_case, _snake_case, _snake_case = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
__lowerCAmelCase = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 341
| 0
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = 'T5Config'
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase = jnp.zeros_like(_A )
UpperCamelCase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCamelCase = shifted_input_ids.at[:, 0].set(_A )
UpperCamelCase = jnp.where(shifted_input_ids == -100 , _A , _A )
return shifted_input_ids
class SCREAMING_SNAKE_CASE_ ( lowerCamelCase__ ):
__lowerCAmelCase = """mt5"""
__lowerCAmelCase = MTaConfig
class SCREAMING_SNAKE_CASE_ ( lowerCamelCase__ ):
__lowerCAmelCase = """mt5"""
__lowerCAmelCase = MTaConfig
class SCREAMING_SNAKE_CASE_ ( lowerCamelCase__ ):
__lowerCAmelCase = """mt5"""
__lowerCAmelCase = MTaConfig
| 358
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def lowercase( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCamelCase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
UpperCamelCase = 4
UpperCamelCase = 48
UpperCamelCase = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCamelCase = [6, 6, 6, 6]
UpperCamelCase = 60
UpperCamelCase = [6, 6, 6, 6]
UpperCamelCase = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCamelCase = 4
UpperCamelCase = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = 126
UpperCamelCase = 7
UpperCamelCase = 2_5_5.0
UpperCamelCase = """"""
return config
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
UpperCamelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
UpperCamelCase = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
UpperCamelCase = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
UpperCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
UpperCamelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
UpperCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
UpperCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
UpperCamelCase = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
UpperCamelCase = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
UpperCamelCase = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
UpperCamelCase = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
UpperCamelCase = """layernorm.weight"""
if name == "norm.bias":
UpperCamelCase = """layernorm.bias"""
if "conv_first" in name:
UpperCamelCase = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
UpperCamelCase = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
UpperCamelCase = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
UpperCamelCase = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
UpperCamelCase = name.replace("""upsample.2""" , """upsample.convolution_1""" )
UpperCamelCase = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
UpperCamelCase = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
UpperCamelCase = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
UpperCamelCase = """swin2sr.""" + name
return name
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(UpperCamelCase_ )
if "qkv" in key:
UpperCamelCase = key.split(""".""" )
UpperCamelCase = int(key_split[1] )
UpperCamelCase = int(key_split[4] )
UpperCamelCase = config.embed_dim
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
pass
else:
UpperCamelCase = val
return orig_state_dict
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = get_config(UpperCamelCase_ )
UpperCamelCase = SwinaSRForImageSuperResolution(UpperCamelCase_ )
model.eval()
UpperCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="""cpu""" )
UpperCamelCase = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase , UpperCamelCase = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(UpperCamelCase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"""Unexpected key {key} in state_dict""" )
# verify values
UpperCamelCase = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert("""RGB""" )
UpperCamelCase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
UpperCamelCase = 126 if """Jpeg""" in checkpoint_url else 256
UpperCamelCase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
UpperCamelCase = transforms(UpperCamelCase_ ).unsqueeze(0 )
if config.num_channels == 1:
UpperCamelCase = pixel_values[:, 0, :, :].unsqueeze(1 )
UpperCamelCase = model(UpperCamelCase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
UpperCamelCase = torch.Size([1, 3, 512, 512] )
UpperCamelCase = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCamelCase = torch.Size([1, 3, 1024, 1024] )
UpperCamelCase = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
UpperCamelCase = torch.Size([1, 3, 1024, 1024] )
UpperCamelCase = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCamelCase = torch.Size([1, 3, 512, 512] )
UpperCamelCase = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCamelCase = torch.Size([1, 3, 1024, 1024] )
UpperCamelCase = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , UpperCamelCase_ , atol=1E-3 )
print("""Looks ok!""" )
UpperCamelCase = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
UpperCamelCase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
model.push_to_hub(f"""caidas/{model_name}""" )
processor.push_to_hub(f"""caidas/{model_name}""" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 165
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
|
def A_ ( A__ , A__ ) -> str:
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
a__ : List[str] = str(bin(A__ ) )[2:] # remove the leading "0b"
a__ : Optional[int] = str(bin(A__ ) )[2:] # remove the leading "0b"
a__ : List[str] = max(len(A__ ) , len(A__ ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(A__ ) , b_binary.zfill(A__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99
| 0
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __lowercase ( _UpperCamelCase ) ->str:
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2], unknown_args[1::2] )}
def __lowercase ( ) ->Dict:
"""simple docstring"""
lowercase : Union[str, Any] = ArgumentParser(
'''HuggingFace Datasets CLI tool''', usage='''datasets-cli <command> [<args>]''', allow_abbrev=_UpperCamelCase )
lowercase : Optional[Any] = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_UpperCamelCase )
EnvironmentCommand.register_subcommand(_UpperCamelCase )
TestCommand.register_subcommand(_UpperCamelCase )
RunBeamCommand.register_subcommand(_UpperCamelCase )
DummyDataCommand.register_subcommand(_UpperCamelCase )
# Parse args
lowercase , lowercase : Optional[Any] = parser.parse_known_args()
if not hasattr(_UpperCamelCase, '''func''' ):
parser.print_help()
exit(1 )
lowercase : Union[str, Any] = parse_unknown_args(_UpperCamelCase )
# Run
lowercase : Union[str, Any] = args.func(_UpperCamelCase, **_UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 173
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __lowercase ( _UpperCamelCase, _UpperCamelCase=0.9_9_9, _UpperCamelCase="cosine", ) ->Tuple:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCamelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCamelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowercase : List[str] = []
for i in range(_UpperCamelCase ):
lowercase : List[str] = i / num_diffusion_timesteps
lowercase : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCamelCase ) / alpha_bar_fn(_UpperCamelCase ), _UpperCamelCase ) )
return torch.tensor(_UpperCamelCase, dtype=torch.floataa )
class __SCREAMING_SNAKE_CASE ( A__ , A__ ):
A : Any = [e.name for e in KarrasDiffusionSchedulers]
A : Dict = 2
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE__ = 1000 , SCREAMING_SNAKE_CASE__ = 0.00085 , SCREAMING_SNAKE_CASE__ = 0.012 , SCREAMING_SNAKE_CASE__ = "linear" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "epsilon" , SCREAMING_SNAKE_CASE__ = "linspace" , SCREAMING_SNAKE_CASE__ = 0 , ):
if trained_betas is not None:
lowercase : str = torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase : Union[str, Any] = torch.linspace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase : str = betas_for_alpha_bar(SCREAMING_SNAKE_CASE__ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
lowercase : Optional[int] = 1.0 - self.betas
lowercase : Union[str, Any] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
if schedule_timesteps is None:
lowercase : Union[str, Any] = self.timesteps
lowercase : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowercase : List[Any] = 1 if len(SCREAMING_SNAKE_CASE__ ) > 1 else 0
else:
lowercase : int = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE__ ) else timestep
lowercase : Optional[Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCamelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
lowercase : Optional[Any] = self.index_for_timestep(SCREAMING_SNAKE_CASE__ )
if self.state_in_first_order:
lowercase : Any = self.sigmas[step_index]
else:
lowercase : Optional[int] = self.sigmas_interpol[step_index]
lowercase : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
lowercase : Any = num_inference_steps
lowercase : Optional[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowercase : Dict = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowercase : int = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase : Union[str, Any] = (np.arange(0 , SCREAMING_SNAKE_CASE__ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowercase : str = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase : Dict = (np.arange(SCREAMING_SNAKE_CASE__ , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE__ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
lowercase : int = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowercase : Optional[int] = torch.from_numpy(np.log(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = np.interp(SCREAMING_SNAKE_CASE__ , np.arange(0 , len(SCREAMING_SNAKE_CASE__ ) ) , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowercase : str = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ )
# interpolate sigmas
lowercase : int = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
lowercase : Optional[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
lowercase : Optional[int] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
# mps does not support float64
lowercase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
else:
lowercase : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
# interpolate timesteps
lowercase : Any = self.sigma_to_t(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ , dtype=timesteps.dtype )
lowercase : List[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
lowercase : Dict = torch.cat([timesteps[:1], interleaved_timesteps] )
lowercase : int = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowercase : Dict = defaultdict(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
# get log sigma
lowercase : Any = sigma.log()
# get distribution
lowercase : Optional[Any] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
lowercase : List[Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
lowercase : str = low_idx + 1
lowercase : Union[str, Any] = self.log_sigmas[low_idx]
lowercase : Union[str, Any] = self.log_sigmas[high_idx]
# interpolate sigmas
lowercase : Dict = (low - log_sigma) / (low - high)
lowercase : Union[str, Any] = w.clamp(0 , 1 )
# transform interpolation to time range
lowercase : List[str] = (1 - w) * low_idx + w * high_idx
lowercase : Tuple = t.view(sigma.shape )
return t
@property
def __lowerCamelCase ( self ):
return self.sample is None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True , ):
lowercase : Optional[Any] = self.index_for_timestep(SCREAMING_SNAKE_CASE__ )
# advance index counter by 1
lowercase : Dict = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowercase : Union[str, Any] = self.sigmas[step_index]
lowercase : List[Any] = self.sigmas_interpol[step_index + 1]
lowercase : Any = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
lowercase : Any = self.sigmas[step_index - 1]
lowercase : List[Any] = self.sigmas_interpol[step_index]
lowercase : Optional[int] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowercase : Union[str, Any] = 0
lowercase : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowercase : List[Any] = sigma_hat if self.state_in_first_order else sigma_interpol
lowercase : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowercase : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol
lowercase : Any = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowercase : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowercase : Union[str, Any] = sigma_interpol - sigma_hat
# store for 2nd order step
lowercase : Optional[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
lowercase : str = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
lowercase : str = sigma_next - sigma_hat
lowercase : List[str] = self.sample
lowercase : Optional[int] = None
lowercase : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase : int = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE__ ):
# mps does not support float64
lowercase : Tuple = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowercase : List[Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowercase : List[str] = self.timesteps.to(original_samples.device )
lowercase : Any = timesteps.to(original_samples.device )
lowercase : Tuple = [self.index_for_timestep(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for t in timesteps]
lowercase : Union[str, Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowercase : Any = sigma.unsqueeze(-1 )
lowercase : Optional[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 173
| 1
|
'''simple docstring'''
import math
def lowercase__ ( __lowercase : int ) -> bool:
"""simple docstring"""
return math.sqrt(__lowercase ) * math.sqrt(__lowercase ) == num
def lowercase__ ( __lowercase : int ) -> bool:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = n
while left <= right:
__UpperCamelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
|
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
snake_case_ : Union[str, Any] = 50_00_00
snake_case_ ,snake_case_ : Optional[int] = os.path.split(__file__)
snake_case_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : datasets.Dataset, **SCREAMING_SNAKE_CASE__ : Dict ) -> str:
UpperCAmelCase_ : List[str] = dataset.map(**SCREAMING_SNAKE_CASE__ )
@get_duration
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : datasets.Dataset, **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
UpperCAmelCase_ : Optional[int] = dataset.filter(**SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> Any:
UpperCAmelCase_ : List[str] = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Optional[int] = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
UpperCAmelCase_ : Dict = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE__, '''dataset.arrow''' ), SCREAMING_SNAKE_CASE__, num_examples=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=SCREAMING_SNAKE_CASE__ )
def tokenize(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return tokenizer(examples['''text'''] )
UpperCAmelCase_ : List[str] = map(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = map(SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : str = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''numpy''' ):
UpperCAmelCase_ : Dict = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''pandas''' ):
UpperCAmelCase_ : Union[str, Any] = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''torch''', columns='''numbers''' ):
UpperCAmelCase_ : Optional[int] = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ):
UpperCAmelCase_ : Optional[Any] = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = map(SCREAMING_SNAKE_CASE__, function=SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Tuple = filter(SCREAMING_SNAKE_CASE__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE__, '''wb''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 125
| 0
|
def __a ( __lowerCamelCase ):
if any(not isinstance(lowerCAmelCase_, lowerCAmelCase_ ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(lowerCAmelCase_ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(lowerCAmelCase_, sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 368
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_a = object()
# For specifying empty leaf dict `{}`
_a = object()
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(__lowerCamelCase ) - len(__lowerCamelCase ) + 1 ):
UpperCAmelCase_ : List[str] = [x.match(__lowerCamelCase ) for x, y in zip(__lowerCamelCase, ks[i:] )]
if matches and all(__lowerCamelCase ):
return True
return False
def __a ( __lowerCamelCase ):
def replace(__lowerCamelCase, __lowerCamelCase ):
for rule, replacement in rules:
if _match(__lowerCamelCase, __lowerCamelCase ):
return replacement
return val
return replace
def __a ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp", __lowerCamelCase )),
(("transformer", "wte", "embedding"), P("mp", __lowerCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCamelCase, "mp" )),
(("attention", "out_proj", "kernel"), P("mp", __lowerCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCamelCase, "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp", __lowerCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = _get_partition_rules()
UpperCAmelCase_ : Any = _replacement_rules(__lowerCamelCase )
UpperCAmelCase_ : Any = {k: _unmatched for k in flatten_dict(__lowerCamelCase )}
UpperCAmelCase_ : Dict = {k: replace(__lowerCamelCase, __lowerCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCamelCase ) )
| 23
| 0
|
'''simple docstring'''
import unittest
import numpy as np
def _lowerCAmelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray | None = None , ) -> np.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.shape(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =np.shape(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =np.shape(_UpperCamelCase )
if shape_a[0] != shape_b[0]:
_SCREAMING_SNAKE_CASE =(
'Expected the same number of rows for A and B. '
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(_UpperCamelCase )
if shape_b[1] != shape_c[1]:
_SCREAMING_SNAKE_CASE =(
'Expected the same number of columns for B and C. '
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =pseudo_inv
if a_inv is None:
try:
_SCREAMING_SNAKE_CASE =np.linalg.inv(_UpperCamelCase )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class A__ ( unittest.TestCase ):
def A ( self : Dict ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_SCREAMING_SNAKE_CASE =np.array([[0, 3], [3, 0], [2, 3]] )
_SCREAMING_SNAKE_CASE =np.array([[2, 1], [6, 3]] )
_SCREAMING_SNAKE_CASE =schur_complement(_a , _a , _a )
_SCREAMING_SNAKE_CASE =np.block([[a, b], [b.T, c]] )
_SCREAMING_SNAKE_CASE =np.linalg.det(_a )
_SCREAMING_SNAKE_CASE =np.linalg.det(_a )
_SCREAMING_SNAKE_CASE =np.linalg.det(_a )
self.assertAlmostEqual(_a , det_a * det_s )
def A ( self : str ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_SCREAMING_SNAKE_CASE =np.array([[0, 3], [3, 0], [2, 3]] )
_SCREAMING_SNAKE_CASE =np.array([[2, 1], [6, 3]] )
with self.assertRaises(_a ):
schur_complement(_a , _a , _a )
def A ( self : List[Any] ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_SCREAMING_SNAKE_CASE =np.array([[0, 3], [3, 0], [2, 3]] )
_SCREAMING_SNAKE_CASE =np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_a ):
schur_complement(_a , _a , _a )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 47
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'instructblip_vision_model'
def __init__( self , __snake_case=1408 , __snake_case=6144 , __snake_case=39 , __snake_case=16 , __snake_case=224 , __snake_case=14 , __snake_case="gelu" , __snake_case=1e-6 , __snake_case=0.0 , __snake_case=1e-10 , __snake_case=True , **__snake_case , ) -> str:
'''simple docstring'''
super().__init__(**__snake_case )
__a =hidden_size
__a =intermediate_size
__a =num_hidden_layers
__a =num_attention_heads
__a =patch_size
__a =image_size
__a =initializer_range
__a =attention_dropout
__a =layer_norm_eps
__a =hidden_act
__a =qkv_bias
@classmethod
def __magic_name__ ( cls , __snake_case , **__snake_case ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__snake_case )
__a , __a =cls.get_config_dict(__snake_case , **__snake_case )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__a =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__snake_case , **__snake_case )
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'instructblip_qformer'
def __init__( self , __snake_case=3_0522 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=2 , __snake_case=1408 , **__snake_case , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=__snake_case , **__snake_case )
__a =vocab_size
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =hidden_act
__a =intermediate_size
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =initializer_range
__a =layer_norm_eps
__a =position_embedding_type
__a =cross_attention_frequency
__a =encoder_hidden_size
@classmethod
def __magic_name__ ( cls , __snake_case , **__snake_case ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__snake_case )
__a , __a =cls.get_config_dict(__snake_case , **__snake_case )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__a =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__snake_case , **__snake_case )
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'instructblip'
SCREAMING_SNAKE_CASE = True
def __init__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=32 , **__snake_case ) -> str:
'''simple docstring'''
super().__init__(**__snake_case )
if vision_config is None:
__a ={}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__a ={}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__a ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__a =InstructBlipVisionConfig(**__snake_case )
__a =InstructBlipQFormerConfig(**__snake_case )
__a =text_config['model_type'] if 'model_type' in text_config else 'opt'
__a =CONFIG_MAPPING[text_model_type](**__snake_case )
__a =self.text_config.tie_word_embeddings
__a =self.text_config.is_encoder_decoder
__a =num_query_tokens
__a =self.vision_config.hidden_size
__a =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__a =1.0
__a =0.02
@classmethod
def __magic_name__ ( cls , __snake_case , __snake_case , __snake_case , **__snake_case , ) -> Optional[Any]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__snake_case , )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =copy.deepcopy(self.__dict__ )
__a =self.vision_config.to_dict()
__a =self.qformer_config.to_dict()
__a =self.text_config.to_dict()
__a =self.__class__.model_type
return output
| 218
| 0
|
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case__ : int = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
snake_case__ : Union[str, Any] = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
snake_case__ : Optional[Any] = spec.loader.load_module()
snake_case__ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
snake_case__ : int = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
snake_case__ : Union[str, Any] = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def _snake_case ( ):
lowerCAmelCase : str = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCAmelCase : int = False
# source code of `config_class`
lowerCAmelCase : Dict = inspect.getsource(_snake_case )
lowerCAmelCase : Tuple = _re_checkpoint.findall(_snake_case )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCAmelCase : str = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase : Tuple = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase : Tuple = True
break
lowerCAmelCase : Optional[Any] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase : Tuple = '''\n'''.join(sorted(_snake_case ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 356
|
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
snake_case__ : Optional[Any] = False
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[Any]=3_2 ):
set_seed(0 )
lowerCAmelCase : Tuple = UNetaDModel(sample_size=UpperCamelCase_ , in_channels=3 , out_channels=3 )
lowerCAmelCase : List[str] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[str] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCAmelCase : str = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , )
lowerCAmelCase : int = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCAmelCase : int = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(UpperCamelCase_ ) for _ in range(4 )]
lowerCAmelCase : Optional[int] = [torch.randn((4, 3, 3_2, 3_2) ).to(UpperCamelCase_ ) for _ in range(4 )]
lowerCAmelCase : Optional[int] = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(UpperCamelCase_ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCAmelCase, lowerCAmelCase : str = self.get_model_optimizer(resolution=3_2 )
model.train().to(UpperCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase : List[str] = model(UpperCamelCase_ , timesteps[i] ).sample
lowerCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCAmelCase, lowerCAmelCase : List[Any] = self.get_model_optimizer(resolution=3_2 )
model.train().to(UpperCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , timesteps[i] ).sample
lowerCAmelCase : int = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
| 314
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
__a = {
'facebook/mbart-large-en-ro': 1_0_2_4,
'facebook/mbart-large-cc25': 1_0_2_4,
}
# fmt: off
__a = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowercase__( _UpperCAmelCase ):
"""simple docstring"""
a :Dict = VOCAB_FILES_NAMES
a :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a :str = PRETRAINED_VOCAB_FILES_MAP
a :int = ['input_ids', 'attention_mask']
a :Tuple = MBartTokenizer
a :List[int] = []
a :List[int] = []
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : List[str]="<s>" , SCREAMING_SNAKE_CASE_ : List[str]="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<pad>" , SCREAMING_SNAKE_CASE_ : List[Any]="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE_ : int , ) -> str:
lowercase_ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
vocab_file=a__ , tokenizer_file=a__ , bos_token=a__ , eos_token=a__ , sep_token=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , src_lang=a__ , tgt_lang=a__ , additional_special_tokens=a__ , **a__ , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
lowercase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
lowercase_ = {
lang_code: self.convert_tokens_to_ids(a__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase_ = src_lang if src_lang is not None else '''en_XX'''
lowercase_ = self.convert_tokens_to_ids(self._src_lang )
lowercase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self : Optional[int] ) -> Dict:
return self._src_lang
@src_lang.setter
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
lowercase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> Tuple:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> Tuple:
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] , SCREAMING_SNAKE_CASE_ : Optional[str] , **SCREAMING_SNAKE_CASE_ : int ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase_ = src_lang
lowercase_ = self(a__ , add_special_tokens=a__ , return_tensors=a__ , **a__ )
lowercase_ = self.convert_tokens_to_ids(a__ )
lowercase_ = tgt_lang_id
return inputs
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str = "en_XX" , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE_ : str = "ro_RO" , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> Dict:
lowercase_ = src_lang
lowercase_ = tgt_lang
return super().prepare_seqaseq_batch(a__ , a__ , **a__ )
def _lowercase ( self : str ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self : List[Any] ) -> Any:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
lowercase_ = self.convert_tokens_to_ids(a__ )
lowercase_ = []
lowercase_ = [self.eos_token_id, self.cur_lang_code]
lowercase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
lowercase_ = self.convert_tokens_to_ids(a__ )
lowercase_ = []
lowercase_ = [self.eos_token_id, self.cur_lang_code]
lowercase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> str:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowercase_ = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 30
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : List[Any] , a__ : Dict=13 , a__ : str=32 , a__ : Tuple=3 , a__ : Optional[Any]=4 , a__ : Optional[int]=[10, 20, 30, 40] , a__ : List[Any]=[2, 2, 3, 2] , a__ : List[Any]=True , a__ : int=True , a__ : List[Any]=37 , a__ : Any="gelu" , a__ : int=10 , a__ : Dict=0.0_2 , a__ : Dict=["stage2", "stage3", "stage4"] , a__ : Tuple=[2, 3, 4] , a__ : List[str]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = num_stages
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_labels
__snake_case = initializer_range
__snake_case = out_features
__snake_case = out_indices
__snake_case = scope
def a (self : Dict ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def a (self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a (self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a (self : Optional[Any] , a__ : List[Any] , a__ : str , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a (self : Tuple , a__ : List[Any] , a__ : List[str] , a__ : List[str] ):
"""simple docstring"""
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case = None
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Dict = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A_ : Optional[Any] = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
A_ : Dict = True
A_ : Optional[Any] = False
A_ : int = False
A_ : int = False
A_ : List[str] = False
def a (self : List[str] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a (self : Tuple ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : str ):
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def a (self : Dict ):
"""simple docstring"""
def check_hidden_states_output(a__ : List[str] , a__ : str , a__ : Tuple ):
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a__ , a__ ) )
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def a (self : Any ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ) -> List[str]:
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : Tuple ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
__snake_case = model(**a__ )
# verify the logits
__snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , _UpperCAmelCase ):
A_ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
A_ : List[Any] = ConvNextConfig
A_ : Optional[Any] = False
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
| 24
| 0
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Dict = ["""image_processor""", """tokenizer"""]
a_ : str = """FlavaImageProcessor"""
a_ : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] , a_ : List[str]=None , a_ : Dict=None , **a_ : List[Any] ):
lowerCAmelCase_ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a_ , )
lowerCAmelCase_ : int = kwargs.pop("feature_extractor" )
lowerCAmelCase_ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a_ , a_ )
lowerCAmelCase_ : Union[str, Any] = self.image_processor
def __call__( self : Union[str, Any] , a_ : Optional[ImageInput] = None , a_ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , a_ : bool = True , a_ : Union[bool, str, PaddingStrategy] = False , a_ : Union[bool, str, TruncationStrategy] = False , a_ : Optional[int] = None , a_ : int = 0 , a_ : Optional[int] = None , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = True , a_ : Optional[Union[str, TensorType]] = None , **a_ : Optional[int] , ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowerCAmelCase_ : str = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
if images is not None:
lowerCAmelCase_ : int = self.image_processor(
a_ , return_image_mask=a_ , return_codebook_pixels=a_ , return_tensors=a_ , **a_ , )
if text is not None and images is not None:
encoding.update(a_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def lowerCamelCase ( self : List[Any] , *a_ : Optional[int] , **a_ : Optional[int] ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def lowerCamelCase ( self : Dict , *a_ : int , **a_ : str ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Any = self.tokenizer.model_input_names
lowerCAmelCase_ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase ( self : List[str] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , )
return self.image_processor_class
@property
def lowerCamelCase ( self : str ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , )
return self.image_processor
| 161
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , a_ : Any , a_ : Any=None , a_ : int=None , a_ : str=None , a_ : Optional[int]="resnet50" , a_ : str=3 , a_ : str=32 , a_ : Union[str, Any]=3 , a_ : Tuple=True , a_ : List[str]=True , ):
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : Dict = out_indices if out_indices is not None else [4]
lowerCAmelCase_ : int = stage_names
lowerCAmelCase_ : Optional[Any] = out_features
lowerCAmelCase_ : Tuple = backbone
lowerCAmelCase_ : List[str] = batch_size
lowerCAmelCase_ : Tuple = image_size
lowerCAmelCase_ : List[Any] = num_channels
lowerCAmelCase_ : Optional[int] = use_pretrained_backbone
lowerCAmelCase_ : List[Any] = is_training
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[int] = self.get_config()
return config, pixel_values
def lowerCamelCase ( self : Dict ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCamelCase ( self : Union[str, Any] , a_ : str , a_ : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] = TimmBackbone(config=a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : int = model(a_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = config_and_inputs
lowerCAmelCase_ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __lowerCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
a_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
a_ : int = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
a_ : Union[str, Any] = False
a_ : str = False
a_ : List[Any] = False
a_ : Dict = False
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = TimmBackboneModelTester(self )
lowerCAmelCase_ : List[str] = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def lowerCamelCase ( self : Dict ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = "resnet18"
lowerCAmelCase_ : List[Any] = "microsoft/resnet-18"
lowerCAmelCase_ : Tuple = AutoBackbone.from_pretrained(a_ , use_timm_backbone=a_ )
lowerCAmelCase_ : str = AutoBackbone.from_pretrained(a_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCAmelCase_ : Dict = AutoBackbone.from_pretrained(a_ , use_timm_backbone=a_ , out_indices=[1, 2, 3] )
lowerCAmelCase_ : Any = AutoBackbone.from_pretrained(a_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def lowerCamelCase ( self : Optional[int] ):
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def lowerCamelCase ( self : Dict ):
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def lowerCamelCase ( self : List[Any] ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowerCamelCase ( self : Dict ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowerCamelCase ( self : Any ):
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def lowerCamelCase ( self : Tuple ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowerCamelCase ( self : str ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowerCamelCase ( self : Any ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowerCamelCase ( self : List[str] ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowerCamelCase ( self : Tuple ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowerCamelCase ( self : Optional[int] ):
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def lowerCamelCase ( self : Dict ):
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def lowerCamelCase ( self : int ):
pass
@unittest.skip("Safetensors is not supported by timm." )
def lowerCamelCase ( self : Union[str, Any] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCamelCase ( self : Union[str, Any] ):
pass
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : str = model_class(a_ )
lowerCAmelCase_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : List[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase_ : int = self.all_model_classes[0]
lowerCAmelCase_ : Optional[int] = model_class(a_ )
model.to(a_ )
lowerCAmelCase_ : Union[str, Any] = self._prepare_for_class(a_ , a_ )
lowerCAmelCase_ : str = model(**a_ )
lowerCAmelCase_ : Any = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase_ : Optional[int] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase_ : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCamelCase ( self : str ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Dict = model_class(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Tuple = model(**a_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase_ : Optional[int] = copy.deepcopy(a_ )
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : Any = model_class(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : int = model(**a_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase_ : str = copy.deepcopy(a_ )
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Optional[int] = model_class(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(**a_ )
| 161
| 1
|
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_lowerCamelCase : Optional[Any] = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
_lowerCamelCase : Optional[int] = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
_lowerCamelCase : List[Any] = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
_lowerCamelCase : Optional[int] = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
_lowerCamelCase : Dict = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[Any]:
"""simple docstring"""
for tf_name, hf_name in patterns:
A__ = k.replace(lowercase_ , lowercase_ )
return k
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
A__ = BigBirdPegasusConfig(**lowercase_ )
A__ = BigBirdPegasusForConditionalGeneration(lowercase_ )
A__ = torch_model.state_dict()
A__ = {}
# separating decoder weights
A__ = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
A__ = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
A__ = [k.endswith(lowercase_ ) for ending in KEYS_TO_IGNORE]
if any(lowercase_ ):
continue
A__ = DECODER_PATTERNS
A__ = rename_state_dict_key(lowercase_ , lowercase_ )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
A__ = v.T
A__ = torch.from_numpy(lowercase_ )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
A__ = [k.endswith(lowercase_ ) for ending in KEYS_TO_IGNORE]
if any(lowercase_ ):
continue
A__ = REMAINING_PATTERNS
A__ = rename_state_dict_key(lowercase_ , lowercase_ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
A__ = v.T
A__ = torch.from_numpy(lowercase_ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
A__ = mapping['''model.embed_positions.weight''']
A__ = mapping.pop('''model.embed_positions.weight''' )
A__ , A__ = torch_model.load_state_dict(lowercase_ , strict=lowercase_ )
A__ = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
A__ = tf.train.list_variables(lowercase_ )
A__ = {}
A__ = ['''global_step''']
for name, shape in tqdm(lowercase_ , desc='''converting tf checkpoint to dict''' ):
A__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
A__ = tf.train.load_variable(lowercase_ , lowercase_ )
A__ = array
return tf_weights
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Any:
"""simple docstring"""
A__ = get_tf_weights_as_numpy(lowercase_ )
A__ = convert_bigbird_pegasus(lowercase_ , lowercase_ )
torch_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
_lowerCamelCase : int = parser.parse_args()
_lowerCamelCase : List[Any] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 14
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] ) -> int:
'''simple docstring'''
lowercase = [0 for i in range(r + 1 )]
# nc0 = 1
lowercase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase = min(lowerCAmelCase__ , lowerCAmelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 197
| 0
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""Input value must be an 'int' type""" )
lowerCAmelCase__ : Dict = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : str = tempfile.mkdtemp()
lowerCAmelCase__ : List[Any] = 8
# DPR tok
lowerCAmelCase__ : int = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname ,"""dpr_tokenizer""" )
os.makedirs(__UpperCAmelCase ,exist_ok=__UpperCAmelCase )
lowerCAmelCase__ : Dict = os.path.join(__UpperCAmelCase ,DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase__ : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase__ : List[Any] = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase__ : Any = {"""unk_token""": """<unk>"""}
lowerCAmelCase__ : str = os.path.join(self.tmpdirname ,"""bart_tokenizer""" )
os.makedirs(__UpperCAmelCase ,exist_ok=__UpperCAmelCase )
lowerCAmelCase__ : Any = os.path.join(__UpperCAmelCase ,BART_VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : Dict = os.path.join(__UpperCAmelCase ,BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) )
def UpperCAmelCase_ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) )
def UpperCAmelCase_ ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Any = os.path.join(self.tmpdirname ,"""rag_tokenizer""" )
lowerCAmelCase__ : Any = RagConfig(question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() )
lowerCAmelCase__ : str = RagTokenizer(question_encoder=self.get_dpr_tokenizer() ,generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__UpperCAmelCase )
rag_tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Any = RagTokenizer.from_pretrained(__UpperCAmelCase ,config=__UpperCAmelCase )
self.assertIsInstance(new_rag_tokenizer.question_encoder ,__UpperCAmelCase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() ,rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator ,__UpperCAmelCase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() ,rag_tokenizer.generator.get_vocab() )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : List[str] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
lowerCAmelCase__ : Optional[Any] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
lowerCAmelCase__ : Dict = tokenizer(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : str = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
lowerCAmelCase__ : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
lowerCAmelCase__ : Tuple = tokenizer(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
| 184
| 0
|
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] ) -> Tuple:
UpperCamelCase__ : Dict = len(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
for j in range(i + 1 , UpperCamelCase__ ):
if numbers[j] < numbers[i]:
UpperCamelCase__ ,UpperCamelCase__ : str = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 201
|
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__lowerCamelCase = logging.getLogger(__name__)
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , ):
"""simple docstring"""
A__ = bnb_quantization_config.load_in_abit
A__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
A__ = []
# custom device map
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(device_map.keys() ) > 1:
A__ = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A__ = get_keys_to_not_convert(UpperCamelCase__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(UpperCamelCase__ )
A__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A__ = []
A__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(UpperCamelCase__ )
# compatibility with peft
A__ = load_in_abit
A__ = load_in_abit
A__ = get_parameter_device(UpperCamelCase__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
A__ = replace_with_bnb_layers(UpperCamelCase__ , UpperCamelCase__ , modules_to_not_convert=UpperCamelCase__ )
# convert param to the right dtype
A__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A__ = name.replace('.weight' , '' ).replace('.bias' , '' )
A__ = getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(UpperCamelCase__ ):
param.to(UpperCamelCase__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
A__ = replace_with_bnb_layers(
UpperCamelCase__ , UpperCamelCase__ , modules_to_not_convert=UpperCamelCase__ )
A__ = get_quantized_model_device_map(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , max_memory=UpperCamelCase__ , no_split_module_classes=UpperCamelCase__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A__ = True
A__ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=UpperCamelCase__ , offload_state_dict=UpperCamelCase__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(UpperCamelCase__ , device_map=UpperCamelCase__ , offload_dir=UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A__ = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
A__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A__ = {}
A__ = special_dtypes
A__ = no_split_module_classes
A__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A__ = get_balanced_memory(
UpperCamelCase__ , low_zero=(device_map == 'balanced_low_0') , max_memory=UpperCamelCase__ , **UpperCamelCase__ , )
A__ = max_memory
A__ = infer_auto_device_map(UpperCamelCase__ , **UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# check if don't have any quantized module on the cpu
A__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ):
"""simple docstring"""
if modules_to_not_convert is None:
A__ = []
A__ , A__ = _replace_with_bnb_layers(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , ):
"""simple docstring"""
A__ = False
for name, module in model.named_children():
if current_key_name is None:
A__ = []
current_key_name.append(UpperCamelCase__ )
if isinstance(UpperCamelCase__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A__ = '.'.join(UpperCamelCase__ )
A__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=UpperCamelCase__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
A__ = module.weight.data
if module.bias is not None:
A__ = module.bias.data
bnb_module.requires_grad_(UpperCamelCase__ )
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = True
if len(list(module.children() ) ) > 0:
A__ , A__ = _replace_with_bnb_layers(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
with init_empty_weights():
A__ = deepcopy(UpperCamelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A__ = find_tied_parameters(UpperCamelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A__ = sum(UpperCamelCase__ , [] )
A__ = len(UpperCamelCase__ ) > 0
# Check if it is a base model
A__ = False
if hasattr(UpperCamelCase__ , 'base_model_prefix' ):
A__ = not hasattr(UpperCamelCase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A__ = list(model.named_children() )
A__ = [list_modules[-1][0]]
# add last module together with tied weights
A__ = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
A__ = list(set(UpperCamelCase__ ) ) + list(UpperCamelCase__ )
# remove ".weight" from the keys
A__ = ['.weight', '.bias']
A__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A__ = name.replace(UpperCamelCase__ , '' )
filtered_module_names.append(UpperCamelCase__ )
return filtered_module_names
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
for m in model.modules():
if isinstance(UpperCamelCase__ , bnb.nn.Linearabit ):
return True
return False
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return next(parameter.parameters() ).device
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(UpperCamelCase__ , UpperCamelCase__ , 0 , dtype=UpperCamelCase__ , value=UpperCamelCase__ )
A__ = param_name
A__ = model
if "." in tensor_name:
A__ = tensor_name.split('.' )
for split in splits[:-1]:
A__ = getattr(UpperCamelCase__ , UpperCamelCase__ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
A__ = new_module
A__ = splits[-1]
# offload weights
A__ = False
offload_weight(module._parameters[tensor_name] , UpperCamelCase__ , UpperCamelCase__ , index=UpperCamelCase__ )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , UpperCamelCase__ , index=UpperCamelCase__ , )
else:
offload_weight(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , index=UpperCamelCase__ )
offload_weight(UpperCamelCase__ , param_name.replace('weight' , 'SCB' ) , UpperCamelCase__ , index=UpperCamelCase__ )
set_module_tensor_to_device(UpperCamelCase__ , UpperCamelCase__ , 'meta' , dtype=UpperCamelCase__ , value=torch.empty(*param.size() ) )
| 221
| 0
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowerCAmelCase : Optional[int] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowerCAmelCase : Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007
def A_( A : Vector , A : Vector):
return np.sqrt(np.sum((np.asarray(lowerCAmelCase__) - np.asarray(lowerCAmelCase__)) ** 2))
def A_( A : Vector , A : Vector):
return sum((va - va) ** 2 for va, va in zip(lowerCAmelCase__ , lowerCAmelCase__)) ** (1 / 2)
if __name__ == "__main__":
def A_( ):
from timeit import timeit
print('Without Numpy')
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=1_0000 , globals=globals() , ))
print('With Numpy')
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=1_0000 , globals=globals() , ))
benchmark()
| 361
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
lowerCAmelCase_ = BlenderbotConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = """gelu"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=False , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_=0.1 , A_=0.1 , A_=20 , A_=2 , A_=1 , A_=0 , )-> List[Any]:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase = prepare_blenderbot_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def UpperCAmelCase_ ( self , A_ , A_ )-> int:
'''simple docstring'''
UpperCamelCase = TFBlenderbotModel(config=A_ ).get_decoder()
UpperCamelCase = inputs_dict['input_ids']
UpperCamelCase = input_ids[:1, :]
UpperCamelCase = inputs_dict['attention_mask'][:1, :]
UpperCamelCase = inputs_dict['head_mask']
UpperCamelCase = 1
# first forward pass
UpperCamelCase = model(A_ , attention_mask=A_ , head_mask=A_ , use_cache=A_ )
UpperCamelCase , UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase = model(A_ , attention_mask=A_ )[0]
UpperCamelCase = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1e-3 )
def A_( A : List[Any] , A : Tuple , A : Optional[Any] , A : List[str]=None , A : str=None , A : List[Any]=None , A : Dict=None , A : Any=None , ):
if attention_mask is None:
UpperCamelCase = tf.cast(tf.math.not_equal(A , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowerCAmelCase_ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase_ = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = TFBlenderbotModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
lowerCAmelCase_ = ["""My friends are cool but they eat too many carbs."""]
lowerCAmelCase_ = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.tokenizer(self.src_text , return_tensors='tf' )
UpperCamelCase = self.model.generate(
model_inputs.input_ids , )
UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 251
| 0
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """char"""
UpperCAmelCase_ = """bpe"""
UpperCAmelCase_ = """wp"""
_lowerCAmelCase : Optional[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = ["""image_processor""", """char_tokenizer"""]
UpperCAmelCase_ = """ViTImageProcessor"""
UpperCAmelCase_ = """MgpstrTokenizer"""
def __init__( self :List[str] , lowerCamelCase :Dict=None , lowerCamelCase :Optional[int]=None , **lowerCamelCase :List[str] ) -> Optional[Any]:
UpperCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase , )
UpperCAmelCase__ = kwargs.pop("feature_extractor" )
UpperCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
UpperCAmelCase__ = tokenizer
UpperCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
UpperCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self :Optional[Any] , lowerCamelCase :List[str]=None , lowerCamelCase :Any=None , lowerCamelCase :Optional[Any]=None , **lowerCamelCase :Optional[int] ) -> Union[str, Any]:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
UpperCAmelCase__ = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None:
UpperCAmelCase__ = self.char_tokenizer(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :Optional[Any] ) -> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = sequences
UpperCAmelCase__ = char_preds.size(0 )
UpperCAmelCase__ , UpperCAmelCase__ = self._decode_helper(lowerCamelCase , "char" )
UpperCAmelCase__ , UpperCAmelCase__ = self._decode_helper(lowerCamelCase , "bpe" )
UpperCAmelCase__ , UpperCAmelCase__ = self._decode_helper(lowerCamelCase , "wp" )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for i in range(lowerCamelCase ):
UpperCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
UpperCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
UpperCAmelCase__ = scores.index(max(lowerCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
UpperCAmelCase__ = {}
UpperCAmelCase__ = final_strs
UpperCAmelCase__ = final_scores
UpperCAmelCase__ = char_strs
UpperCAmelCase__ = bpe_strs
UpperCAmelCase__ = wp_strs
return out
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :int , lowerCamelCase :List[str] ) -> Union[str, Any]:
if format == DecodeType.CHARACTER:
UpperCAmelCase__ = self.char_decode
UpperCAmelCase__ = 1
UpperCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
UpperCAmelCase__ = self.bpe_decode
UpperCAmelCase__ = 2
UpperCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
UpperCAmelCase__ = self.wp_decode
UpperCAmelCase__ = 102
UpperCAmelCase__ = "[SEP]"
else:
raise ValueError(f'''Format {format} is not supported.''' )
UpperCAmelCase__ , UpperCAmelCase__ = [], []
UpperCAmelCase__ = pred_logits.size(0 )
UpperCAmelCase__ = pred_logits.size(1 )
UpperCAmelCase__ , UpperCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=lowerCamelCase , sorted=lowerCamelCase )
UpperCAmelCase__ = preds_index.view(-1 , lowerCamelCase )[:, 1:]
UpperCAmelCase__ = decoder(lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ = torch.nn.functional.softmax(lowerCamelCase , dim=2 ).max(dim=2 )
UpperCAmelCase__ = preds_max_prob[:, 1:]
for index in range(lowerCamelCase ):
UpperCAmelCase__ = preds_str[index].find(lowerCamelCase )
UpperCAmelCase__ = preds_str[index][:pred_eos]
UpperCAmelCase__ = preds_index[index].cpu().tolist()
UpperCAmelCase__ = pred_index.index(lowerCamelCase ) if eos_token in pred_index else -1
UpperCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
UpperCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCamelCase )
conf_scores.append(lowerCamelCase )
return dec_strs, conf_scores
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :int ) -> List[str]:
UpperCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(lowerCamelCase )]
return decode_strs
def UpperCAmelCase_ ( self :Optional[Any] , lowerCamelCase :Dict ) -> Dict:
return self.bpe_tokenizer.batch_decode(lowerCamelCase )
def UpperCAmelCase_ ( self :str , lowerCamelCase :str ) -> Tuple:
UpperCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(lowerCamelCase )]
return decode_strs
| 169
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 169
| 1
|
'''simple docstring'''
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(__a , x % y )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
return (x * y) // greatest_common_divisor(__a , __a )
def _a ( _lowerCamelCase = 20 ) -> int:
"""simple docstring"""
__snake_case : str = 1
for i in range(1 , n + 1 ):
__snake_case : Tuple = lcm(__a , __a )
return g
if __name__ == "__main__":
print(f"""{solution() = }""")
| 364
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
for attribute in key.split(""".""" ):
__snake_case : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
__snake_case : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
__snake_case : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__snake_case : Union[str, Any] = value
elif weight_type == "weight_g":
__snake_case : str = value
elif weight_type == "weight_v":
__snake_case : Tuple = value
elif weight_type == "bias":
__snake_case : str = value
else:
__snake_case : List[Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = []
__snake_case : List[Any] = fairseq_model.state_dict()
__snake_case : int = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : Any = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__snake_case : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
__snake_case : Optional[Any] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__snake_case : Dict = True
if "*" in mapped_key:
__snake_case : List[Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
__snake_case : Optional[int] = mapped_key.replace("""*""" , _lowerCamelCase )
if "weight_g" in name:
__snake_case : Dict = """weight_g"""
elif "weight_v" in name:
__snake_case : List[str] = """weight_v"""
elif "weight" in name:
__snake_case : str = """weight"""
elif "bias" in name:
__snake_case : int = """bias"""
else:
__snake_case : int = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Dict = full_name.split("""conv_layers.""" )[-1]
__snake_case : Optional[int] = name.split(""".""" )
__snake_case : Dict = int(items[0] )
__snake_case : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__snake_case : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__snake_case : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__snake_case : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__snake_case : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : List[str] = SEWConfig()
if is_finetuned:
__snake_case : List[Any] = model.wav_encoder.wav_model.cfg
else:
__snake_case : Optional[Any] = model.cfg
__snake_case : Tuple = fs_config.conv_bias
__snake_case : List[Any] = eval(fs_config.conv_feature_layers )
__snake_case : List[Any] = [x[0] for x in conv_layers]
__snake_case : Dict = [x[1] for x in conv_layers]
__snake_case : Tuple = [x[2] for x in conv_layers]
__snake_case : List[str] = """gelu"""
__snake_case : Dict = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
__snake_case : Optional[int] = 0.0
__snake_case : Optional[Any] = fs_config.activation_fn.name
__snake_case : Dict = fs_config.encoder_embed_dim
__snake_case : Dict = 0.02
__snake_case : Any = fs_config.encoder_ffn_embed_dim
__snake_case : Tuple = 1E-5
__snake_case : Dict = fs_config.encoder_layerdrop
__snake_case : Any = fs_config.encoder_attention_heads
__snake_case : int = fs_config.conv_pos_groups
__snake_case : Tuple = fs_config.conv_pos
__snake_case : Optional[int] = len(_lowerCamelCase )
__snake_case : int = fs_config.encoder_layers
__snake_case : Optional[int] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__snake_case : Union[str, Any] = model.cfg
__snake_case : Tuple = fs_config.final_dropout
__snake_case : Tuple = fs_config.layerdrop
__snake_case : Any = fs_config.activation_dropout
__snake_case : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__snake_case : Tuple = fs_config.attention_dropout
__snake_case : List[Any] = fs_config.dropout_input
__snake_case : Optional[Any] = fs_config.dropout
__snake_case : str = fs_config.mask_channel_length
__snake_case : Any = fs_config.mask_channel_prob
__snake_case : int = fs_config.mask_length
__snake_case : str = fs_config.mask_prob
__snake_case : str = """Wav2Vec2FeatureExtractor"""
__snake_case : Dict = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> int:
"""simple docstring"""
if is_finetuned:
__snake_case , __snake_case , __snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__snake_case : Optional[Any] = SEWConfig.from_pretrained(_lowerCamelCase )
else:
__snake_case : int = convert_config(model[0] , _lowerCamelCase )
__snake_case : Dict = model[0].eval()
__snake_case : Optional[Any] = True if config.feat_extract_norm == """layer""" else False
__snake_case : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
if is_finetuned:
if dict_path:
__snake_case : str = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : Union[str, Any] = target_dict.pad_index
__snake_case : Optional[Any] = target_dict.bos_index
__snake_case : Tuple = target_dict.pad_index
__snake_case : List[str] = target_dict.bos_index
__snake_case : Optional[Any] = target_dict.eos_index
__snake_case : List[str] = len(target_dict.symbols )
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , """vocab.json""" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , _lowerCamelCase )
__snake_case : List[Any] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowerCamelCase , )
__snake_case : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
__snake_case : List[str] = SEWForCTC(_lowerCamelCase )
else:
__snake_case : List[str] = SEWModel(_lowerCamelCase )
feature_extractor.save_pretrained(_lowerCamelCase )
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__UpperCamelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 13
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : str = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105
|
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[str] =ComputeEnvironment.AMAZON_SAGEMAKER
lowerCamelCase : str =True
lowerCamelCase : Union[str, Any] ="""ml.p3.2xlarge"""
lowerCamelCase : str ="""accelerate_sagemaker_execution_role"""
lowerCamelCase : int ="""hf-sm"""
lowerCamelCase : int ="""us-east-1"""
lowerCamelCase : Tuple =1
lowerCamelCase : Any ="""accelerate-sagemaker-1"""
lowerCamelCase : str ="""1.6"""
lowerCamelCase : Tuple ="""4.4"""
lowerCamelCase : Optional[int] ="""train.py"""
lowerCamelCase : Optional[Any] =[
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
lowerCamelCase : Union[str, Any] =[
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> List[str]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
a : str = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , lowerCAmelCase__ )
assert isinstance(converted_args["do_train"] , lowerCAmelCase__ )
assert isinstance(converted_args["epochs"] , lowerCAmelCase__ )
assert isinstance(converted_args["learning_rate"] , lowerCAmelCase__ )
assert isinstance(converted_args["max_steps"] , lowerCAmelCase__ )
with pytest.raises(lowerCAmelCase__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 105
| 1
|
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE__ ( snake_case : int = 1_000_000 ) -> Union[str, Any]:
"""simple docstring"""
a : List[str] = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a : str = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a : Dict = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 357
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345
| 0
|
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__UpperCamelCase : List[Any] = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__UpperCamelCase : Optional[Any] = {
'''abeja/gpt-neox-japanese-2.7b''': 2_0_4_8,
}
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
with open(A_ , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase__ : Dict = json.loads(f.read() )
lowerCAmelCase__ : Union[str, Any] = collections.OrderedDict()
lowerCAmelCase__ : Optional[int] = collections.OrderedDict()
lowerCAmelCase__ : Optional[Any] = collections.OrderedDict()
with open(A_ , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase__ : str = f.readlines()
lowerCAmelCase__ : Optional[int] = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(A_ ):
lowerCAmelCase__ : int = b
lowerCAmelCase__ : Dict = idx
for wd in b:
lowerCAmelCase__ : List[str] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : List[Any] ,lowercase_ : Dict ,lowercase_ : Dict ,lowercase_ : int="<|endoftext|>" ,lowercase_ : Dict="<|endoftext|>" ,lowercase_ : str="<|startoftext|>" ,lowercase_ : str="<|endoftext|>" ,lowercase_ : List[Any]=False ,**lowercase_ : Optional[Any] ,):
super().__init__(
unk_token=lowercase_ ,pad_token=lowercase_ ,bos_token=lowercase_ ,eos_token=lowercase_ ,do_clean_text=lowercase_ ,**lowercase_ ,)
if not os.path.isfile(lowercase_ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(lowercase_ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
lowerCAmelCase__ : Tuple = do_clean_text
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = load_vocab_and_emoji(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Tuple = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def __lowerCAmelCase ( self : int ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def __lowerCAmelCase ( self : Union[str, Any] ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def __lowerCAmelCase ( self : Any ,lowercase_ : List[Any] ):
return self.subword_tokenizer.tokenize(lowercase_ ,clean=self.do_clean_text )
def __lowerCAmelCase ( self : List[str] ,lowercase_ : List[str] ):
return self.vocab.get(lowercase_ ,self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self : int ,lowercase_ : str ):
return self.subword_tokenizer.convert_id_to_token(lowercase_ )
def __lowerCAmelCase ( self : int ,lowercase_ : List[Any] ):
lowerCAmelCase__ : Union[str, Any] = ''''''.join(lowercase_ ).strip()
return out_string
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : "Conversation" ):
lowerCAmelCase__ : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ ,add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
lowerCAmelCase__ : Dict = input_ids[-self.model_max_length :]
return input_ids
def __lowerCAmelCase ( self : List[str] ,lowercase_ : str ,lowercase_ : Optional[str] = None ):
lowerCAmelCase__ : Tuple = 0
if os.path.isdir(lowercase_ ):
lowerCAmelCase__ : Union[str, Any] = os.path.join(
lowercase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ : List[Any] = os.path.join(
lowercase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
lowerCAmelCase__ : str = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
lowerCAmelCase__ : Any = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(lowercase_ ,'''w''' ,encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''' )
lowerCAmelCase__ : Dict = token_index
writer.write(''','''.join(lowercase_ ) + '''\n''' )
index += 1
with open(lowercase_ ,'''w''' ,encoding='''utf-8''' ) as writer:
json.dump(self.emoji ,lowercase_ )
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : str ,lowercase_ : Optional[Any] ,lowercase_ : Dict ,lowercase_ : Any ):
lowerCAmelCase__ : Optional[int] = vocab # same as swe
lowerCAmelCase__ : Optional[int] = ids_to_tokens # same as bpe
lowerCAmelCase__ : Optional[Any] = emoji
lowerCAmelCase__ : Any = np.max([len(lowercase_ ) for w in self.vocab.keys()] )
lowerCAmelCase__ : int = re.compile(R'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
lowerCAmelCase__ : str = re.compile(R'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
lowerCAmelCase__ : List[str] = re.compile(R'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
lowerCAmelCase__ : Union[str, Any] = re.compile(
R'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
lowerCAmelCase__ : List[Any] = re.compile(
R'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
lowerCAmelCase__ : Optional[int] = re.compile(
R'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
lowerCAmelCase__ : List[Any] = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
lowerCAmelCase__ : List[Any] = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
lowerCAmelCase__ : List[str] = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self : int ):
return len(self.ids_to_tokens )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Dict ):
lowerCAmelCase__ : Tuple = self.content_repattera.sub('''<URL>''' ,lowercase_ )
lowerCAmelCase__ : List[str] = self.content_repattera.sub('''<EMAIL>''' ,lowercase_ )
lowerCAmelCase__ : str = self.content_repattera.sub('''<TEL>''' ,lowercase_ )
lowerCAmelCase__ : Dict = self.content_repattera.sub('''<DATE>''' ,lowercase_ )
lowerCAmelCase__ : Optional[Any] = self.content_repattera.sub('''<DATE>''' ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = self.content_repattera.sub('''<PRICE>''' ,lowercase_ )
lowerCAmelCase__ : Optional[Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowerCAmelCase__ : str = content.replace('''<BLOCK><BLOCK>''' ,'''<BLOCK>''' )
return content
def __lowerCAmelCase ( self : List[str] ,lowercase_ : Union[str, Any] ,lowercase_ : List[str]=False ):
lowerCAmelCase__ : str = text.replace(''' ''' ,'''<SP>''' )
lowerCAmelCase__ : Tuple = text.replace(''' ''' ,'''<SP>''' )
lowerCAmelCase__ : Optional[int] = text.replace('''\r\n''' ,'''<BR>''' )
lowerCAmelCase__ : str = text.replace('''\n''' ,'''<BR>''' )
lowerCAmelCase__ : Tuple = text.replace('''\r''' ,'''<BR>''' )
lowerCAmelCase__ : Union[str, Any] = text.replace('''\t''' ,'''<TAB>''' )
lowerCAmelCase__ : str = text.replace('''—''' ,'''ー''' )
lowerCAmelCase__ : Union[str, Any] = text.replace('''−''' ,'''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCAmelCase__ : Tuple = text.replace(lowercase_ ,lowercase_ )
if clean:
lowerCAmelCase__ : str = self.clean_text(lowercase_ )
def check_simbol(lowercase_ : Optional[int] ):
lowerCAmelCase__ : List[Any] = x.encode()
if len(lowercase_ ) == 1 and len(lowercase_ ) == 2:
lowerCAmelCase__ : Union[str, Any] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC2_A1 and c <= 0xC2_BF)
or (c >= 0xC7_80 and c <= 0xC7_83)
or (c >= 0xCA_B9 and c <= 0xCB_BF)
or (c >= 0xCC_80 and c <= 0xCD_A2)
):
return True
return False
def checkuae(lowercase_ : Union[str, Any] ):
lowerCAmelCase__ : List[str] = x.encode()
if len(lowercase_ ) == 1 and len(lowercase_ ) == 3:
lowerCAmelCase__ : int = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE2_80_80 and c <= 0xE2_B0_7F:
return True
return False
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Optional[Any] = []
while pos < len(lowercase_ ):
lowerCAmelCase__ : Optional[int] = min(len(lowercase_ ) ,pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
lowerCAmelCase__ : int = [] # (token_id, token, pos)
for e in range(lowercase_ ,lowercase_ ,-1 ):
lowerCAmelCase__ : int = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowercase_ ) > 2:
lowerCAmelCase__ : List[str] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowercase_ ) > 0:
# the smallest token_id is adopted
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = sorted(lowercase_ ,key=lambda lowercase_ : x[0] )[0]
result.append(lowercase_ )
lowerCAmelCase__ : Tuple = e
else:
lowerCAmelCase__ : Any = pos + 1
lowerCAmelCase__ : Any = text[pos:end]
if check_simbol(lowercase_ ):
result.append('''<KIGOU>''' )
elif checkuae(lowercase_ ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
lowerCAmelCase__ : Dict = end
return result
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : Union[str, Any] ,lowercase_ : Dict="\n" ):
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Any = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowercase_ ) > 0:
words.append(bytearray(lowercase_ ).decode('''utf-8''' ,errors='''replace''' ) )
lowerCAmelCase__ : Optional[int] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(lowercase_ )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(lowercase_ )
if len(lowercase_ ) > 0:
words.append(bytearray(lowercase_ ).decode('''utf-8''' ,errors='''replace''' ) )
lowerCAmelCase__ : Any = ''''''.join(lowercase_ )
return text
| 106
|
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : str ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCAmelCase : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__snake_case , cache_dir=__snake_case )
UpperCAmelCase : str = [t[-1] for t in os.walk(os.path.join(__snake_case , os.listdir(__snake_case )[0] , '''snapshots''' ) )]
UpperCAmelCase : str = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] ) -> Dict:
UpperCAmelCase , UpperCAmelCase : str = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__snake_case )
UpperCAmelCase : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : Optional[Any] = jax.device_count()
UpperCAmelCase : Tuple = num_samples * [prompt]
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : Any = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[Any] = shard(__snake_case )
UpperCAmelCase : Optional[int] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(__snake_case , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
UpperCAmelCase : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__snake_case ) == num_samples
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=__snake_case )
UpperCAmelCase : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Optional[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase : Any = 50
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : int = num_samples * [prompt]
UpperCAmelCase : Union[str, Any] = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Dict = replicate(__snake_case )
UpperCAmelCase : int = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Tuple = shard(__snake_case )
UpperCAmelCase : Tuple = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def A ( self : int ) -> Dict:
UpperCAmelCase , UpperCAmelCase : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case )
UpperCAmelCase : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Union[str, Any] = jax.random.PRNGKey(0 )
UpperCAmelCase : List[str] = 50
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : List[Any] = num_samples * [prompt]
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : List[Any] = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[int] = shard(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def A ( self : int ) -> Any:
UpperCAmelCase , UpperCAmelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
UpperCAmelCase : List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
UpperCAmelCase : Union[str, Any] = 50
UpperCAmelCase : Optional[int] = jax.device_count()
UpperCAmelCase : List[str] = num_samples * [prompt]
UpperCAmelCase : Dict = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : Any = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : str = shard(__snake_case )
UpperCAmelCase : Optional[int] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def A ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : int = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , set_alpha_to_one=__snake_case , steps_offset=1 , )
UpperCAmelCase , UpperCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=__snake_case , safety_checker=__snake_case , )
UpperCAmelCase : Tuple = scheduler.create_state()
UpperCAmelCase : Dict = scheduler_state
UpperCAmelCase : str = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : int = jax.random.PRNGKey(0 )
UpperCAmelCase : Union[str, Any] = 50
UpperCAmelCase : Optional[Any] = jax.device_count()
UpperCAmelCase : Any = num_samples * [prompt]
UpperCAmelCase : Dict = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : str = replicate(__snake_case )
UpperCAmelCase : List[str] = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[int] = shard(__snake_case )
UpperCAmelCase : Dict = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def A ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : List[Any] = num_samples * [prompt]
UpperCAmelCase : str = jax.random.split(jax.random.PRNGKey(0 ) , __snake_case )
UpperCAmelCase , UpperCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case , )
UpperCAmelCase : Dict = replicate(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline.prepare_inputs(__snake_case )
UpperCAmelCase : List[str] = shard(__snake_case )
UpperCAmelCase : Any = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase : Optional[int] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCAmelCase , UpperCAmelCase : Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case , use_memory_efficient_attention=__snake_case , )
UpperCAmelCase : int = replicate(__snake_case )
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
UpperCAmelCase : List[Any] = shard(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase : int = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 23
| 0
|
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowercase__ = n - k
# Calculate C(n,k)
for i in range(SCREAMING_SNAKE_CASE ):
result *= n - i
result //= i + 1
return result
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return binomial_coefficient(2 * node_count , SCREAMING_SNAKE_CASE ) // (node_count + 1)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
lowercase__ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return catalan_number(SCREAMING_SNAKE_CASE ) * factorial(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
f"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
f"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 361
|
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return base * power(SCREAMING_SNAKE_CASE , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
lowerCAmelCase = int(input('Enter the base: ').strip())
lowerCAmelCase = int(input('Enter the exponent: ').strip())
lowerCAmelCase = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowerCAmelCase = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 93
| 0
|
# using dfs for finding eulerian path traversal
def a_ ( _A , _A , _A , _A=None ) -> Any:
"""simple docstring"""
snake_case__ = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
snake_case__ , snake_case__ = True, True
snake_case__ = dfs(_A , _A , _A , _A )
return path
def a_ ( _A , _A ) -> Any:
"""simple docstring"""
snake_case__ = 0
snake_case__ = -1
for i in range(_A ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
snake_case__ = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def a_ ( _A , _A ) -> Dict:
"""simple docstring"""
snake_case__ = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
snake_case__ , snake_case__ = check_circuit_or_path(_A , _A )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
snake_case__ = 1
if check == 2:
snake_case__ = odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
snake_case__ = dfs(_A , _A , _A )
print(_A )
def a_ ( ) -> int:
"""simple docstring"""
snake_case__ = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
snake_case__ = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
snake_case__ = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
snake_case__ = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
snake_case__ = {
1: [],
2: []
# all degree is zero
}
snake_case__ = 10
check_euler(_A , _A )
check_euler(_A , _A )
check_euler(_A , _A )
check_euler(_A , _A )
check_euler(_A , _A )
if __name__ == "__main__":
main()
| 307
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a_ ( _A , _A=0.999 , _A="cosine" , ) -> Optional[int]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
snake_case__ = []
for i in range(_A ):
snake_case__ = i / num_diffusion_timesteps
snake_case__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ) , _A ) )
return torch.tensor(_A , dtype=torch.floataa )
class __SCREAMING_SNAKE_CASE( a_ , a_ ):
_UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
_UpperCAmelCase = 2
@register_to_config
def __init__( self: Dict , UpperCamelCase: int = 10_00 , UpperCamelCase: float = 0.00_085 , UpperCamelCase: float = 0.012 , UpperCamelCase: str = "linear" , UpperCamelCase: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase: str = "epsilon" , UpperCamelCase: Optional[bool] = False , UpperCamelCase: Optional[bool] = False , UpperCamelCase: float = 1.0 , UpperCamelCase: str = "linspace" , UpperCamelCase: int = 0 , ) -> str:
if trained_betas is not None:
snake_case__ = torch.tensor(UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
snake_case__ = torch.linspace(UpperCamelCase , UpperCamelCase , UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case__ = betas_for_alpha_bar(UpperCamelCase , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
snake_case__ = betas_for_alpha_bar(UpperCamelCase , alpha_transform_type='exp' )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
snake_case__ = 1.0 - self.betas
snake_case__ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase , UpperCamelCase , UpperCamelCase )
snake_case__ = use_karras_sigmas
def lowerCAmelCase_ ( self: str , UpperCamelCase: int , UpperCamelCase: Optional[int]=None ) -> str:
if schedule_timesteps is None:
snake_case__ = self.timesteps
snake_case__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
snake_case__ = 1 if len(UpperCamelCase ) > 1 else 0
else:
snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep
snake_case__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
snake_case__ = self.index_for_timestep(UpperCamelCase )
snake_case__ = self.sigmas[step_index]
snake_case__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: int , UpperCamelCase: Union[str, torch.device] = None , UpperCamelCase: Optional[int] = None , ) -> str:
snake_case__ = num_inference_steps
snake_case__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
snake_case__ = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase , dtype=UpperCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
snake_case__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case__ = (np.arange(0 , UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
snake_case__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case__ = (np.arange(UpperCamelCase , 0 , -step_ratio )).round().copy().astype(UpperCamelCase )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
snake_case__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
snake_case__ = np.log(UpperCamelCase )
snake_case__ = np.interp(UpperCamelCase , np.arange(0 , len(UpperCamelCase ) ) , UpperCamelCase )
if self.config.use_karras_sigmas:
snake_case__ = self._convert_to_karras(in_sigmas=UpperCamelCase , num_inference_steps=self.num_inference_steps )
snake_case__ = np.array([self._sigma_to_t(UpperCamelCase , UpperCamelCase ) for sigma in sigmas] )
snake_case__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
snake_case__ = torch.from_numpy(UpperCamelCase ).to(device=UpperCamelCase )
snake_case__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
snake_case__ = torch.from_numpy(UpperCamelCase )
snake_case__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(UpperCamelCase ).startswith('mps' ):
# mps does not support float64
snake_case__ = timesteps.to(UpperCamelCase , dtype=torch.floataa )
else:
snake_case__ = timesteps.to(device=UpperCamelCase )
# empty dt and derivative
snake_case__ = None
snake_case__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
snake_case__ = defaultdict(UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Dict ) -> Tuple:
# get log sigma
snake_case__ = np.log(UpperCamelCase )
# get distribution
snake_case__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
snake_case__ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
snake_case__ = low_idx + 1
snake_case__ = log_sigmas[low_idx]
snake_case__ = log_sigmas[high_idx]
# interpolate sigmas
snake_case__ = (low - log_sigma) / (low - high)
snake_case__ = np.clip(UpperCamelCase , 0 , 1 )
# transform interpolation to time range
snake_case__ = (1 - w) * low_idx + w * high_idx
snake_case__ = t.reshape(sigma.shape )
return t
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Dict ) -> torch.FloatTensor:
snake_case__ = in_sigmas[-1].item()
snake_case__ = in_sigmas[0].item()
snake_case__ = 7.0 # 7.0 is the value used in the paper
snake_case__ = np.linspace(0 , 1 , UpperCamelCase )
snake_case__ = sigma_min ** (1 / rho)
snake_case__ = sigma_max ** (1 / rho)
snake_case__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]:
return self.dt is None
def lowerCAmelCase_ ( self: int , UpperCamelCase: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase: Union[float, torch.FloatTensor] , UpperCamelCase: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase: bool = True , ) -> Union[SchedulerOutput, Tuple]:
snake_case__ = self.index_for_timestep(UpperCamelCase )
# advance index counter by 1
snake_case__ = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
snake_case__ = self.sigmas[step_index]
snake_case__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
snake_case__ = self.sigmas[step_index - 1]
snake_case__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
snake_case__ = 0
snake_case__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
snake_case__ = sigma_hat if self.state_in_first_order else sigma_next
snake_case__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
snake_case__ = sigma_hat if self.state_in_first_order else sigma_next
snake_case__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
snake_case__ = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
snake_case__ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
snake_case__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
snake_case__ = sigma_next - sigma_hat
# store for 2nd order step
snake_case__ = derivative
snake_case__ = dt
snake_case__ = sample
else:
# 2. 2nd order / Heun's method
snake_case__ = (sample - pred_original_sample) / sigma_next
snake_case__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
snake_case__ = self.dt
snake_case__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase )
def lowerCAmelCase_ ( self: Any , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
snake_case__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase ):
# mps does not support float64
snake_case__ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
snake_case__ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
snake_case__ = self.timesteps.to(original_samples.device )
snake_case__ = timesteps.to(original_samples.device )
snake_case__ = [self.index_for_timestep(UpperCamelCase , UpperCamelCase ) for t in timesteps]
snake_case__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
snake_case__ = sigma.unsqueeze(-1 )
snake_case__ = original_samples + noise * sigma
return noisy_samples
def __len__( self: List[Any] ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 307
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( _A ):
a : int = 0.00
a : Union[str, Any] = 0
for resistor in resistors:
if resistor <= 0:
a : Any = f"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(_A )
first_sum += 1 / float(_A )
index += 1
return 1 / first_sum
def lowerCamelCase__ ( _A ):
a : List[Any] = 0.00
a : str = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
a : Dict = f"""Resistor at index {index} has a negative value!"""
raise ValueError(_A )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = CycleDiffusionPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
lowercase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self : Any ):
torch.manual_seed(0 )
a : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
a : str = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=10_00 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
a : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
a : List[str] = CLIPTextModel(__snake_case )
a : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a : Tuple = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase_ ( self : Optional[int] , __snake_case : Dict , __snake_case : Any=0 ):
a : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
a : Optional[Any] = image / 2 + 0.5
if str(__snake_case ).startswith('mps' ):
a : List[str] = torch.manual_seed(__snake_case )
else:
a : Union[str, Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : List[Any] = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self : Optional[int] ):
a : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : int = self.get_dummy_components()
a : str = CycleDiffusionPipeline(**__snake_case )
a : List[str] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a : Dict = self.get_dummy_inputs(__snake_case )
a : Union[str, Any] = pipe(**__snake_case )
a : List[Any] = output.images
a : Optional[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a : Tuple = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowercase_ ( self : int ):
a : List[Any] = self.get_dummy_components()
for name, module in components.items():
if hasattr(__snake_case , 'half' ):
a : Any = module.half()
a : Tuple = CycleDiffusionPipeline(**__snake_case )
a : Any = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a : str = self.get_dummy_inputs(__snake_case )
a : int = pipe(**__snake_case )
a : Optional[int] = output.images
a : Tuple = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a : int = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowercase_ ( self : List[Any] ):
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def lowercase_ ( self : Dict ):
return super().test_inference_batch_single_identical()
@skip_mps
def lowercase_ ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowercase_ ( self : Dict ):
return super().test_save_load_optional_components()
@skip_mps
def lowercase_ ( self : List[Any] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
def lowercase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[int] ):
a : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
a : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
a : List[str] = init_image.resize((5_12, 5_12) )
a : Dict = 'CompVis/stable-diffusion-v1-4'
a : List[str] = DDIMScheduler.from_pretrained(__snake_case , subfolder='scheduler' )
a : Any = CycleDiffusionPipeline.from_pretrained(
__snake_case , scheduler=__snake_case , safety_checker=__snake_case , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
a : Union[str, Any] = 'A black colored car'
a : Optional[Any] = 'A blue colored car'
a : int = torch.manual_seed(0 )
a : Optional[Any] = pipe(
prompt=__snake_case , source_prompt=__snake_case , image=__snake_case , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__snake_case , output_type='np' , )
a : Dict = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def lowercase_ ( self : int ):
a : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
a : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
a : str = init_image.resize((5_12, 5_12) )
a : Optional[int] = 'CompVis/stable-diffusion-v1-4'
a : Union[str, Any] = DDIMScheduler.from_pretrained(__snake_case , subfolder='scheduler' )
a : str = CycleDiffusionPipeline.from_pretrained(__snake_case , scheduler=__snake_case , safety_checker=__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
a : Tuple = 'A black colored car'
a : Tuple = 'A blue colored car'
a : List[str] = torch.manual_seed(0 )
a : str = pipe(
prompt=__snake_case , source_prompt=__snake_case , image=__snake_case , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__snake_case , output_type='np' , )
a : Tuple = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 96
| 1
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE__ : Any = {
'b0': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def A ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : int = EfficientNetConfig()
lowerCamelCase : List[str] = CONFIG_MAP[model_name]["hidden_dim"]
lowerCamelCase : List[str] = CONFIG_MAP[model_name]["width_coef"]
lowerCamelCase : Any = CONFIG_MAP[model_name]["depth_coef"]
lowerCamelCase : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Optional[int] = CONFIG_MAP[model_name]["dropout_rate"]
lowerCamelCase : str = CONFIG_MAP[model_name]["dw_padding"]
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : Any = 1000
lowerCamelCase : Any = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : List[str] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : Any = {v: k for k, v in idalabel.items()}
return config
def A ( ) -> int:
lowerCamelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def A ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[Any] = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : str = EfficientNetImageProcessor(
size={"height": size, "width": size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.47853944, 0.4732864, 0.47434163] ,do_center_crop=_SCREAMING_SNAKE_CASE ,)
return preprocessor
def A ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowerCamelCase : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
lowerCamelCase : Any = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE ,range(_SCREAMING_SNAKE_CASE ) )}
lowerCamelCase : List[Any] = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
lowerCamelCase : Dict = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
lowerCamelCase : Optional[int] = {}
for item in rename_keys:
if item[0] in original_param_names:
lowerCamelCase : List[str] = "efficientnet." + item[1]
lowerCamelCase : int = "classifier.weight"
lowerCamelCase : Union[str, Any] = "classifier.bias"
return key_mapping
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowerCamelCase : Tuple = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowerCamelCase : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
lowerCamelCase : int = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
lowerCamelCase : List[str] = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowerCamelCase : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowerCamelCase : Optional[int] = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE ,weights="imagenet" ,input_tensor=_SCREAMING_SNAKE_CASE ,input_shape=_SCREAMING_SNAKE_CASE ,pooling=_SCREAMING_SNAKE_CASE ,classes=1000 ,classifier_activation="softmax" ,)
lowerCamelCase : List[Any] = original_model.trainable_variables
lowerCamelCase : Tuple = original_model.non_trainable_variables
lowerCamelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowerCamelCase : List[str] = param.numpy()
lowerCamelCase : int = list(tf_params.keys() )
# Load HuggingFace model
lowerCamelCase : Union[str, Any] = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowerCamelCase : Tuple = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
lowerCamelCase : Union[str, Any] = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowerCamelCase : int = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = preprocessor(images=prepare_img() ,return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowerCamelCase : Optional[Any] = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = outputs.logits.detach().numpy()
# Original model inference
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Any = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Optional[int] = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
lowerCamelCase : Union[str, Any] = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = np.expand_dims(_SCREAMING_SNAKE_CASE ,axis=0 )
lowerCamelCase : Dict = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
lowerCamelCase : int = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 48
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowerCAmelCase__ ) , """Tatoeba directory does not exist.""" )
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> int:
lowerCamelCase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase__ )
@slow
def _lowercase ( self ) -> List[Any]:
self.resolver.convert_models(["heb-eng"] )
@slow
def _lowercase ( self ) -> Tuple:
lowerCamelCase , lowerCamelCase : Dict = self.resolver.write_model_card("opus-mt-he-en" , dry_run=UpperCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 48
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__lowercase: Tuple = logging.get_logger(__name__)
class UpperCAmelCase ( lowerCamelCase_):
def __init__( self : int, *a_ : int, **a_ : Optional[int] ):
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead.", lowerCAmelCase__, )
super().__init__(*lowerCAmelCase__, **lowerCAmelCase__ )
| 365
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'yolos'
def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = version.parse('1.11')
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 12
| 31
| 0
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_a : str= (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_a : list[int]= [ord(letter) for letter in string.ascii_lowercase]
_a : set[int]= {ord(char) for char in VALID_CHARS}
_a : list[str]= ["the", "be", "to", "of", "and", "in", "that", "have"]
def __UpperCAmelCase ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : tuple[int, ...] ) -> str | None:
'''simple docstring'''
__snake_case : str = ""
__snake_case : int
__snake_case : int
__snake_case : int
for keychar, cipherchar in zip(cycle(UpperCAmelCase_ ) , UpperCAmelCase_ ):
__snake_case : Union[str, Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(UpperCAmelCase_ )
return decoded
def __UpperCAmelCase ( UpperCAmelCase_ : list[int] ) -> list[str]:
'''simple docstring'''
__snake_case : list[str] = []
for key in product(UpperCAmelCase_ , repeat=3 ):
__snake_case : Dict = try_key(UpperCAmelCase_ , UpperCAmelCase_ )
if encoded is not None:
possibles.append(UpperCAmelCase_ )
return possibles
def __UpperCAmelCase ( UpperCAmelCase_ : list[str] , UpperCAmelCase_ : str ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def __UpperCAmelCase ( UpperCAmelCase_ : str = "p059_cipher.txt" ) -> int:
'''simple docstring'''
__snake_case : list[int]
__snake_case : list[str]
__snake_case : str
__snake_case : str
__snake_case : str = Path(UpperCAmelCase_ ).parent.joinpath(UpperCAmelCase_ ).read_text(encoding='utf-8' )
__snake_case : Optional[Any] = [int(UpperCAmelCase_ ) for number in data.strip().split(',' )]
__snake_case : Dict = filter_valid_chars(UpperCAmelCase_ )
for common_word in COMMON_WORDS:
__snake_case : Any = filter_common_word(UpperCAmelCase_ , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) == 1:
break
__snake_case : int = possibles[0]
return sum(ord(UpperCAmelCase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 172
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Union[str, Any]= logging.get_logger(__name__)
_a : str= {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : List[str] = """mgp-str"""
def __init__(self : List[Any] , _A : Dict=[32, 1_28] , _A : Any=4 , _A : int=3 , _A : Any=27 , _A : List[str]=38 , _A : str=5_02_57 , _A : Optional[int]=3_05_22 , _A : Union[str, Any]=7_68 , _A : Tuple=12 , _A : List[str]=12 , _A : List[str]=4.0 , _A : Optional[int]=True , _A : Optional[Any]=False , _A : Dict=1E-5 , _A : Optional[int]=0.0 , _A : str=0.0 , _A : int=0.0 , _A : str=False , _A : List[Any]=0.02 , **_A : Union[str, Any] , ) -> Tuple:
super().__init__(**_A)
__snake_case : Union[str, Any] = image_size
__snake_case : Optional[int] = patch_size
__snake_case : int = num_channels
__snake_case : int = max_token_length
__snake_case : List[Any] = num_character_labels
__snake_case : Optional[int] = num_bpe_labels
__snake_case : Optional[Any] = num_wordpiece_labels
__snake_case : int = hidden_size
__snake_case : List[Any] = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : Any = mlp_ratio
__snake_case : List[str] = distilled
__snake_case : List[Any] = layer_norm_eps
__snake_case : List[Any] = drop_rate
__snake_case : Optional[int] = qkv_bias
__snake_case : Optional[int] = attn_drop_rate
__snake_case : int = drop_path_rate
__snake_case : List[str] = output_aa_attentions
__snake_case : Optional[Any] = initializer_range
| 172
| 1
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = MobileBertTokenizer
UpperCamelCase = MobileBertTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = filter_non_english
UpperCamelCase = '''google/mobilebert-uncased'''
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
_UpperCAmelCase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def _lowerCamelCase ( self : int , A : str) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = 'UNwant\u00E9d,running'
_UpperCAmelCase = 'unwanted, running'
return input_text, output_text
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class(self.vocab_file)
_UpperCAmelCase = tokenizer.tokenize('UNwant\u00E9d,running')
self.assertListEqual(A , ['un', '##want', '##ed', ',', 'runn', '##ing'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(A) , [9, 6, 7, 12, 10, 11])
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'UNwant\u00E9d,running'
_UpperCAmelCase = tokenizer.tokenize(A)
_UpperCAmelCase = rust_tokenizer.tokenize(A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A)
_UpperCAmelCase = rust_tokenizer.encode(A , add_special_tokens=A)
self.assertListEqual(A , A)
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(A)
_UpperCAmelCase = rust_tokenizer.encode(A)
self.assertListEqual(A , A)
# With lower casing
_UpperCAmelCase = self.get_tokenizer(do_lower_case=A)
_UpperCAmelCase = self.get_rust_tokenizer(do_lower_case=A)
_UpperCAmelCase = 'UNwant\u00E9d,running'
_UpperCAmelCase = tokenizer.tokenize(A)
_UpperCAmelCase = rust_tokenizer.tokenize(A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A)
_UpperCAmelCase = rust_tokenizer.encode(A , add_special_tokens=A)
self.assertListEqual(A , A)
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(A)
_UpperCAmelCase = rust_tokenizer.encode(A)
self.assertListEqual(A , A)
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz') , ['ah', '\u535A', '\u63A8', 'zz'])
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = BasicTokenizer(do_lower_case=A)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['hello', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def _lowerCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = BasicTokenizer(do_lower_case=A , strip_accents=A)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hällo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['h\u00E9llo'])
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = BasicTokenizer(do_lower_case=A , strip_accents=A)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def _lowerCamelCase ( self : Tuple) -> int:
"""simple docstring"""
_UpperCAmelCase = BasicTokenizer(do_lower_case=A)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = BasicTokenizer(do_lower_case=A)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'])
def _lowerCamelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = BasicTokenizer(do_lower_case=A , strip_accents=A)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'])
def _lowerCamelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = BasicTokenizer(do_lower_case=A , strip_accents=A)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'])
def _lowerCamelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = BasicTokenizer(do_lower_case=A , never_split=['[UNK]'])
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'])
def _lowerCamelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_UpperCAmelCase = {}
for i, token in enumerate(A):
_UpperCAmelCase = i
_UpperCAmelCase = WordpieceTokenizer(vocab=A , unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') , [])
self.assertListEqual(tokenizer.tokenize('unwanted running') , ['un', '##want', '##ed', 'runn', '##ing'])
self.assertListEqual(tokenizer.tokenize('unwantedX running') , ['[UNK]', 'runn', '##ing'])
def _lowerCamelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
self.assertTrue(_is_whitespace(' '))
self.assertTrue(_is_whitespace('\t'))
self.assertTrue(_is_whitespace('\r'))
self.assertTrue(_is_whitespace('\n'))
self.assertTrue(_is_whitespace('\u00A0'))
self.assertFalse(_is_whitespace('A'))
self.assertFalse(_is_whitespace('-'))
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005'))
self.assertFalse(_is_control('A'))
self.assertFalse(_is_control(' '))
self.assertFalse(_is_control('\t'))
self.assertFalse(_is_control('\r'))
def _lowerCamelCase ( self : Optional[int]) -> int:
"""simple docstring"""
self.assertTrue(_is_punctuation('-'))
self.assertTrue(_is_punctuation('$'))
self.assertTrue(_is_punctuation('`'))
self.assertTrue(_is_punctuation('.'))
self.assertFalse(_is_punctuation('A'))
self.assertFalse(_is_punctuation(' '))
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']])
self.assertListEqual(
[rust_tokenizer.tokenize(A) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']])
@slow
def _lowerCamelCase ( self : int) -> str:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class.from_pretrained('google/mobilebert-uncased')
_UpperCAmelCase = tokenizer.encode('sequence builders' , add_special_tokens=A)
_UpperCAmelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=A)
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A)
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A , A)
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def _lowerCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
_UpperCAmelCase = tokenizer_r.encode_plus(
A , return_attention_mask=A , return_token_type_ids=A , return_offsets_mapping=A , add_special_tokens=A , )
_UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(A , 'do_lower_case') else False
_UpperCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids']))
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'])
def _lowerCamelCase ( self : Tuple) -> Any:
"""simple docstring"""
_UpperCAmelCase = ['的', '人', '有']
_UpperCAmelCase = ''.join(A)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = tokenizer_p.encode(A , add_special_tokens=A)
_UpperCAmelCase = tokenizer_r.encode(A , add_special_tokens=A)
_UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(A)
_UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(A)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A , A)
self.assertListEqual(A , A)
_UpperCAmelCase = False
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = self.tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = tokenizer_r.encode(A , add_special_tokens=A)
_UpperCAmelCase = tokenizer_p.encode(A , add_special_tokens=A)
_UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(A)
_UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(A)
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCAmelCase = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(A)
]
self.assertListEqual(A , A)
self.assertListEqual(A , A)
| 290
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["CLIPFeatureExtractor"]
UpperCAmelCase__ = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290
| 1
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=13 , A=32 , A=2 , A=3 , A=16 , A=[32, 64, 1_28] , A=[1, 2, 1] , A=[2, 2, 4] , A=2 , A=2.0 , A=True , A=0.0 , A=0.0 , A=0.1 , A="gelu" , A=False , A=True , A=0.02 , A=1e-5 , A=True , A=None , A=True , A=10 , A=8 , A=["stage1", "stage2"] , A=[1, 2] , ) -> List[str]:
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = image_size
lowerCamelCase = patch_size
lowerCamelCase = num_channels
lowerCamelCase = embed_dim
lowerCamelCase = hidden_sizes
lowerCamelCase = depths
lowerCamelCase = num_heads
lowerCamelCase = window_size
lowerCamelCase = mlp_ratio
lowerCamelCase = qkv_bias
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = drop_path_rate
lowerCamelCase = hidden_act
lowerCamelCase = use_absolute_embeddings
lowerCamelCase = patch_norm
lowerCamelCase = layer_norm_eps
lowerCamelCase = initializer_range
lowerCamelCase = is_training
lowerCamelCase = scope
lowerCamelCase = use_labels
lowerCamelCase = type_sequence_label_size
lowerCamelCase = encoder_stride
lowerCamelCase = out_features
lowerCamelCase = out_indices
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> List[Any]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __A ( self , A , A , A ) -> Any:
'''simple docstring'''
lowerCamelCase = FocalNetModel(config=A )
model.to(A )
model.eval()
lowerCamelCase = model(A )
lowerCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = FocalNetBackbone(config=A )
model.to(A )
model.eval()
lowerCamelCase = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase = None
lowerCamelCase = FocalNetBackbone(config=A )
model.to(A )
model.eval()
lowerCamelCase = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = FocalNetForMaskedImageModeling(config=A )
model.to(A )
model.eval()
lowerCamelCase = model(A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase = 1
lowerCamelCase = FocalNetForMaskedImageModeling(A )
model.to(A )
model.eval()
lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase = model(A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = self.type_sequence_label_size
lowerCamelCase = FocalNetForImageClassification(A )
model.to(A )
model.eval()
lowerCamelCase = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase = 1
lowerCamelCase = FocalNetForImageClassification(A )
model.to(A )
model.eval()
lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase : int = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : List[str] = False
UpperCamelCase : List[Any] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : str = False
UpperCamelCase : List[str] = False
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = FocalNetModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=A , embed_dim=37 , has_text_modality=A )
def __A ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ) -> List[str]:
'''simple docstring'''
return
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A )
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase = model_class(A )
lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def __A ( self , A , A , A , A ) -> str:
'''simple docstring'''
lowerCamelCase = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
lowerCamelCase = model(**self._prepare_for_class(A , A ) )
lowerCamelCase = outputs.hidden_states
lowerCamelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A ) , A )
# FocalNet has a different seq_length
lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase = outputs.reshaped_hidden_states
self.assertEqual(len(A ) , A )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = reshaped_hidden_states[0].shape
lowerCamelCase = (
reshaped_hidden_states[0].view(A , A , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase = True
self.check_hidden_states_output(A , A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase = True
self.check_hidden_states_output(A , A , A , A )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase = True
self.check_hidden_states_output(A , A , A , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase = True
self.check_hidden_states_output(A , A , A , (padded_height, padded_width) )
@slow
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = FocalNetModel.from_pretrained(A )
self.assertIsNotNone(A )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = _config_zero_init(A )
for model_class in self.all_model_classes:
lowerCamelCase = model_class(config=A )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self ) -> List[str]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(A )
lowerCamelCase = self.default_image_processor
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
lowerCamelCase = model(**A )
# verify the logits
lowerCamelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , A )
lowerCamelCase = torch.tensor([0.2166, -0.4368, 0.2191] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = (FocalNetBackbone,) if is_torch_available() else ()
UpperCamelCase : Dict = FocalNetConfig
UpperCamelCase : Optional[Any] = False
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = FocalNetModelTester(self )
| 252
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase : Optional[Any] = 16
UpperCAmelCase : Optional[Any] = 32
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
return int(x / 2**20 )
class __lowercase :
"""simple docstring"""
def __enter__( self ) -> Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCamelCase = torch.cuda.memory_allocated()
return self
def __exit__( self , *A ) -> int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
lowerCamelCase = torch.cuda.memory_allocated()
lowerCamelCase = torch.cuda.max_memory_allocated()
lowerCamelCase = bamb(self.end - self.begin )
lowerCamelCase = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCamelCase ( lowerCamelCase__ : Accelerator , lowerCamelCase__ : int = 16 , lowerCamelCase__ : str = "bert-base-cased" , lowerCamelCase__ : int = 320 , lowerCamelCase__ : int = 160 , ):
'''simple docstring'''
lowerCamelCase = AutoTokenizer.from_pretrained(lowerCamelCase__ )
lowerCamelCase = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": f'train[:{n_train}]', """validation""": f'validation[:{n_val}]'} )
def tokenize_function(lowerCamelCase__ : str ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
def __lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple ):
'''simple docstring'''
lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase = config["""lr"""]
lowerCamelCase = int(config["""num_epochs"""] )
lowerCamelCase = int(config["""seed"""] )
lowerCamelCase = int(config["""batch_size"""] )
lowerCamelCase = args.model_name_or_path
set_seed(lowerCamelCase__ )
lowerCamelCase , lowerCamelCase = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ , return_dict=lowerCamelCase__ )
# Instantiate optimizer
lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase = optimizer_cls(params=model.parameters() , lr=lowerCamelCase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCamelCase = 1
lowerCamelCase = (len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=0 , num_training_steps=lowerCamelCase__ , )
else:
lowerCamelCase = DummyScheduler(lowerCamelCase__ , total_num_steps=lowerCamelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase = 0
# Now we train the model
lowerCamelCase = {}
for epoch in range(lowerCamelCase__ , lowerCamelCase__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowerCamelCase__ ):
lowerCamelCase = model(**lowerCamelCase__ )
lowerCamelCase = outputs.loss
lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCamelCase = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowerCamelCase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase__ , )
parser.add_argument(
"""--output_dir""" , type=lowerCamelCase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=lowerCamelCase__ , default=320 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=lowerCamelCase__ , default=160 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase__ , default=1 , help="""Number of train epochs.""" , )
lowerCamelCase = parser.parse_args()
lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 252
| 1
|
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_lowerCAmelCase : List[str] = Lock()
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(SCREAMING_SNAKE_CASE__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_UpperCAmelCase : Optional[int] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_UpperCAmelCase : Tuple = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(SCREAMING_SNAKE_CASE__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_UpperCAmelCase : int = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_UpperCAmelCase : Optional[Any] = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(SCREAMING_SNAKE_CASE__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : str = []
_UpperCAmelCase : List[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_UpperCAmelCase : Union[str, Any] = Pipe()
_UpperCAmelCase : str = Pipe()
process_array_.append(
Process(
target=SCREAMING_SNAKE_CASE__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_UpperCAmelCase : Optional[Any] = temp_rs
_UpperCAmelCase : str = temp_rr
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) - 1 ):
_UpperCAmelCase : Optional[int] = Pipe()
_UpperCAmelCase : str = Pipe()
process_array_.append(
Process(
target=SCREAMING_SNAKE_CASE__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_UpperCAmelCase : Tuple = temp_rs
_UpperCAmelCase : List[Any] = temp_rr
process_array_.append(
Process(
target=SCREAMING_SNAKE_CASE__ , args=(
len(SCREAMING_SNAKE_CASE__ ) - 1,
arr[len(SCREAMING_SNAKE_CASE__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(SCREAMING_SNAKE_CASE__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(SCREAMING_SNAKE_CASE__ ) ):
_UpperCAmelCase : Tuple = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : str = odd_even_transposition(SCREAMING_SNAKE_CASE__ )
print("Sorted List\n" )
print(*SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 202
|
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_lowerCAmelCase : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
_lowerCAmelCase : Tuple = spec.loader.load_module()
_lowerCAmelCase : Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_lowerCAmelCase : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
_lowerCAmelCase : Optional[int] = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def __snake_case ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = []
for config_class in list(CONFIG_MAPPING.values() ):
_UpperCAmelCase : Union[str, Any] = False
# source code of `config_class`
_UpperCAmelCase : Optional[int] = inspect.getsource(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = _re_checkpoint.findall(SCREAMING_SNAKE_CASE__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_UpperCAmelCase , _UpperCAmelCase : List[Any] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase : Optional[Any] = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase : Optional[Any] = True
break
_UpperCAmelCase : int = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
_UpperCAmelCase : List[str] = "\n".join(sorted(SCREAMING_SNAKE_CASE__ ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 202
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE :List[Any] = {
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Dict = [
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 15
|
import math
import sys
def A_ ( _UpperCAmelCase ):
if number != int(_UpperCAmelCase ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
SCREAMING_SNAKE_CASE_: List[str] = [-1] * (number + 1)
SCREAMING_SNAKE_CASE_: str = 0
for i in range(1 , number + 1 ):
SCREAMING_SNAKE_CASE_: str = sys.maxsize
SCREAMING_SNAKE_CASE_: List[Any] = int(math.sqrt(_UpperCAmelCase ) )
for j in range(1 , root + 1 ):
SCREAMING_SNAKE_CASE_: List[str] = 1 + answers[i - (j**2)]
SCREAMING_SNAKE_CASE_: Optional[Any] = min(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13
| 0
|
import sys
from collections import defaultdict
class __a:
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = []
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return self.node_position[vertex]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = pos
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCAmelCase_ : Dict = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCAmelCase_ : List[str] = 2 * start + 1
else:
UpperCAmelCase_ : str = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCAmelCase_, UpperCAmelCase_ : int = heap[smallest_child], positions[smallest_child]
UpperCAmelCase_, UpperCAmelCase_ : Any = (
heap[start],
positions[start],
)
UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = temp, tempa
UpperCAmelCase_ : Union[str, Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,_SCREAMING_SNAKE_CASE )
self.top_to_bottom(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : str = position[index]
while index != 0:
UpperCAmelCase_ : Dict = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCAmelCase_ : int = heap[parent]
UpperCAmelCase_ : Union[str, Any] = position[parent]
self.set_position(position[parent] ,_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : List[str] = val
UpperCAmelCase_ : int = temp
self.set_position(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
break
UpperCAmelCase_ : int = parent
else:
UpperCAmelCase_ : Optional[int] = val
UpperCAmelCase_ : Any = temp
self.set_position(_SCREAMING_SNAKE_CASE ,0 )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE ) // 2 - 1
for i in range(_SCREAMING_SNAKE_CASE ,-1 ,-1 ):
self.top_to_bottom(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,len(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ : Optional[Any] = positions[0]
UpperCAmelCase_ : Optional[Any] = sys.maxsize
self.top_to_bottom(_SCREAMING_SNAKE_CASE ,0 ,len(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
return temp
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = Heap()
UpperCAmelCase_ : Optional[int] = [0] * len(_lowercase )
UpperCAmelCase_ : Optional[int] = [-1] * len(_lowercase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCAmelCase_ : Dict = [] # Heap of Distance of vertices from their neighboring vertex
UpperCAmelCase_ : List[str] = []
for vertex in range(len(_lowercase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowercase )
heap.node_position.append(_lowercase )
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : List[Any] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : str = distance
heap.heapify(_lowercase , _lowercase )
for _ in range(1 , len(_lowercase ) ):
UpperCAmelCase_ : Optional[Any] = heap.delete_minimum(_lowercase , _lowercase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCAmelCase_ : Optional[int] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowercase )]
):
UpperCAmelCase_ : int = distance
heap.bottom_to_top(
_lowercase , heap.get_position(_lowercase ) , _lowercase , _lowercase )
UpperCAmelCase_ : Tuple = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__a = int(input('Enter number of edges: ').strip())
__a = defaultdict(list)
for _ in range(edges_number):
__a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 235
|
from collections import defaultdict
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = first_str.lower().strip()
UpperCAmelCase_ : Any = second_str.lower().strip()
# Remove whitespace
UpperCAmelCase_ : Any = first_str.replace(''' ''' , '''''' )
UpperCAmelCase_ : int = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(_lowercase ) != len(_lowercase ):
return False
# Default values for count should be 0
UpperCAmelCase_ : defaultdict[str, int] = defaultdict(_lowercase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_lowercase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__a = input('Enter the first string ').strip()
__a = input('Enter the second string ').strip()
__a = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 235
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase__ = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
lowerCamelCase__ = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
lowerCamelCase__ = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
lowerCamelCase__ = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 86
|
def _a ( lowerCamelCase ):
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 287
| 0
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase ( self ) -> Any:
_A , _A = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=lowerCAmelCase_ , dtype=jnp.bfloataa )
_A , _A = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=lowerCAmelCase_ , from_pt=lowerCAmelCase_ , dtype=jnp.bfloataa )
_A = controlnet_params
_A = """bird"""
_A = jax.device_count()
_A = pipe.prepare_text_inputs([prompts] * num_samples )
_A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
_A = pipe.prepare_image_inputs([canny_image] * num_samples )
_A = jax.random.PRNGKey(0 )
_A = jax.random.split(lowerCAmelCase_ , jax.device_count() )
_A = replicate(lowerCAmelCase_ )
_A = shard(lowerCAmelCase_ )
_A = shard(lowerCAmelCase_ )
_A = pipe(
prompt_ids=lowerCAmelCase_ , image=lowerCAmelCase_ , params=lowerCAmelCase_ , prng_seed=lowerCAmelCase_ , num_inference_steps=50 , jit=lowerCAmelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
_A = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_A = images[0, 2_53:2_56, 2_53:2_56, -1]
_A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_A = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self ) -> Dict:
_A , _A = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=lowerCAmelCase_ , dtype=jnp.bfloataa )
_A , _A = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=lowerCAmelCase_ , from_pt=lowerCAmelCase_ , dtype=jnp.bfloataa )
_A = controlnet_params
_A = """Chef in the kitchen"""
_A = jax.device_count()
_A = pipe.prepare_text_inputs([prompts] * num_samples )
_A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
_A = pipe.prepare_image_inputs([pose_image] * num_samples )
_A = jax.random.PRNGKey(0 )
_A = jax.random.split(lowerCAmelCase_ , jax.device_count() )
_A = replicate(lowerCAmelCase_ )
_A = shard(lowerCAmelCase_ )
_A = shard(lowerCAmelCase_ )
_A = pipe(
prompt_ids=lowerCAmelCase_ , image=lowerCAmelCase_ , params=lowerCAmelCase_ , prng_seed=lowerCAmelCase_ , num_inference_steps=50 , jit=lowerCAmelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
_A = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_A = images[0, 2_53:2_56, 2_53:2_56, -1]
_A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_A = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 370
|
from collections.abc import Sequence
def snake_case ( snake_case__ :Sequence[float] , snake_case__ :bool = False) -> float:
if not arr:
return 0
_A = 0 if allow_empty_subarrays else float("""-inf""")
_A = 0.0
for num in arr:
_A = max(0 if allow_empty_subarrays else num , curr_sum + num)
_A = max(snake_case__ , snake_case__)
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_SCREAMING_SNAKE_CASE = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 81
| 0
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCamelCase : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = v.to_dict()
return d
| 316
|
"""simple docstring"""
def A ( snake_case :list[int] , snake_case :int ) -> bool:
__UpperCamelCase = len(snake_case )
__UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__UpperCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
__UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = 0
lowercase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
for i in range(n - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return arr, 0
lowercase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // 2
lowercase__ : Union[str, Any] = arr[0:mid]
lowercase__ : List[Any] = arr[mid:]
lowercase__ , lowercase__ : Optional[int] = count_inversions_recursive(SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ : int = count_inversions_recursive(SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ : Dict = _count_cross_inversions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ : Any = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = []
lowercase__ : Dict = 0
while i < len(SCREAMING_SNAKE_CASE_ ) and j < len(SCREAMING_SNAKE_CASE_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(SCREAMING_SNAKE_CASE_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(SCREAMING_SNAKE_CASE_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Tuple = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowercase__ : Optional[Any] = count_inversions_bf(SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ : Optional[int] = count_inversions_recursive(SCREAMING_SNAKE_CASE_ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , SCREAMING_SNAKE_CASE_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowercase__ : Union[str, Any] = count_inversions_bf(SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ : Optional[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , SCREAMING_SNAKE_CASE_ )
# an empty list should also have zero inversions
lowercase__ : Optional[int] = []
lowercase__ : List[str] = count_inversions_bf(SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ : Optional[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 364
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = {}
lowercase__ : Tuple = tokenizer(example["content"] , truncation=lowerCamelCase__ )["input_ids"]
lowercase__ : Optional[int] = len(example["content"] ) / len(output["input_ids"] )
return output
lowerCAmelCase__ = HfArgumentParser(PretokenizationArguments)
lowerCAmelCase__ = parser.parse_args()
if args.num_workers is None:
lowerCAmelCase__ = multiprocessing.cpu_count()
lowerCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = load_dataset(args.dataset_name, split='''train''')
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
lowerCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 121
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Dict = '''decision_transformer'''
A__ : int = ['''past_key_values''']
A__ : List[Any] = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[Any] , _snake_case : Dict=17 , _snake_case : Any=4 , _snake_case : Optional[Any]=128 , _snake_case : List[Any]=4096 , _snake_case : Optional[int]=True , _snake_case : List[str]=1 , _snake_case : Optional[int]=1024 , _snake_case : Optional[Any]=3 , _snake_case : Tuple=1 , _snake_case : Tuple=None , _snake_case : int="relu" , _snake_case : Tuple=0.1 , _snake_case : Tuple=0.1 , _snake_case : str=0.1 , _snake_case : Dict=1E-5 , _snake_case : int=0.02 , _snake_case : int=True , _snake_case : Any=True , _snake_case : int=5_0256 , _snake_case : Dict=5_0256 , _snake_case : str=False , _snake_case : Any=False , **_snake_case : List[str] , ):
__lowercase : Optional[int] = state_dim
__lowercase : int = act_dim
__lowercase : Tuple = hidden_size
__lowercase : Optional[Any] = max_ep_len
__lowercase : Dict = action_tanh
__lowercase : Union[str, Any] = vocab_size
__lowercase : Optional[Any] = n_positions
__lowercase : List[Any] = n_layer
__lowercase : Optional[Any] = n_head
__lowercase : List[Any] = n_inner
__lowercase : Optional[int] = activation_function
__lowercase : Optional[Any] = resid_pdrop
__lowercase : List[str] = embd_pdrop
__lowercase : Any = attn_pdrop
__lowercase : Optional[Any] = layer_norm_epsilon
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = scale_attn_weights
__lowercase : Optional[Any] = use_cache
__lowercase : str = scale_attn_by_inverse_layer_idx
__lowercase : Union[str, Any] = reorder_and_upcast_attn
__lowercase : Optional[Any] = bos_token_id
__lowercase : List[Any] = eos_token_id
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
| 156
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[str]:
__lowercase : Optional[int] = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
__lowercase : Union[str, Any] = to_pil_image(__lowerCAmelCase )
__lowercase , __lowercase : Any = pil_image.size
__lowercase : Union[str, Any] = pytesseract.image_to_data(__lowerCAmelCase , lang=__lowerCAmelCase , output_type='''dict''' , config=__lowerCAmelCase )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : int = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
__lowercase : str = [idx for idx, word in enumerate(__lowerCAmelCase ) if not word.strip()]
__lowercase : List[Any] = [word for idx, word in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
__lowercase : Tuple = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
__lowercase : Any = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
__lowercase : str = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
__lowercase : str = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__lowercase : List[Any] = []
for x, y, w, h in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
__lowercase : int = [x, y, x + w, y + h]
actual_boxes.append(__lowerCAmelCase )
# finally, normalize the bounding boxes
__lowercase : str = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Dict = ['''pixel_values''']
def __init__( self : str , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : bool = True , _snake_case : Optional[str] = None , _snake_case : Optional[str] = "" , **_snake_case : Union[str, Any] , ):
super().__init__(**_snake_case )
__lowercase : Optional[int] = size if size is not None else {'''height''': 224, '''width''': 224}
__lowercase : Optional[int] = get_size_dict(_snake_case )
__lowercase : Optional[int] = do_resize
__lowercase : List[str] = size
__lowercase : Optional[Any] = resample
__lowercase : str = apply_ocr
__lowercase : List[Any] = ocr_lang
__lowercase : Optional[int] = tesseract_config
def snake_case_ ( self : str , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Any , ):
__lowercase : Optional[Any] = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
__lowercase : Dict = (size['''height'''], size['''width'''])
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case )
def snake_case_ ( self : int , _snake_case : ImageInput , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = None , _snake_case : bool = None , _snake_case : Optional[str] = None , _snake_case : Optional[str] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : ChannelDimension = ChannelDimension.FIRST , **_snake_case : Optional[int] , ):
__lowercase : str = do_resize if do_resize is not None else self.do_resize
__lowercase : int = size if size is not None else self.size
__lowercase : Dict = get_size_dict(_snake_case )
__lowercase : Union[str, Any] = resample if resample is not None else self.resample
__lowercase : int = apply_ocr if apply_ocr is not None else self.apply_ocr
__lowercase : List[str] = ocr_lang if ocr_lang is not None else self.ocr_lang
__lowercase : Union[str, Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
__lowercase : Union[str, Any] = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
__lowercase : Optional[int] = [to_numpy_array(_snake_case ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
__lowercase : Optional[int] = []
__lowercase : Tuple = []
for image in images:
__lowercase , __lowercase : Dict = apply_tesseract(_snake_case , _snake_case , _snake_case )
words_batch.append(_snake_case )
boxes_batch.append(_snake_case )
if do_resize:
__lowercase : int = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
__lowercase : Tuple = [flip_channel_order(_snake_case ) for image in images]
__lowercase : int = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
__lowercase : Optional[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_snake_case )
if apply_ocr:
__lowercase : str = words_batch
__lowercase : int = boxes_batch
return data
| 156
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
A__ : List[str] = None
A__ : int = logging.get_logger(__name__)
A__ : int = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : Optional[int] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
A__ : str = {
'''facebook/nllb-large-en-ro''': 1_0_2_4,
'''facebook/nllb-200-distilled-600M''': 1_0_2_4,
}
# fmt: off
A__ : str = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class snake_case__ ( __snake_case ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = ["input_ids", "attention_mask"]
A__ = NllbTokenizer
A__ = []
A__ = []
def __init__( self : List[Any] , __a : List[Any]=None , __a : Optional[int]=None , __a : Tuple="<s>" , __a : Any="</s>" , __a : str="</s>" , __a : List[Any]="<s>" , __a : Optional[Any]="<unk>" , __a : List[str]="<pad>" , __a : Any="<mask>" , __a : List[str]=None , __a : List[Any]=None , __a : List[Any]=None , __a : List[Any]=False , **__a : Tuple , ) -> Tuple:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
__snake_case : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
__snake_case : str = legacy_behaviour
super().__init__(
vocab_file=lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , legacy_behaviour=lowerCamelCase_ , **lowerCamelCase_ , )
__snake_case : Optional[int] = vocab_file
__snake_case : Dict = False if not self.vocab_file else True
__snake_case : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
__snake_case : str = {
lang_code: self.convert_tokens_to_ids(lowerCamelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__snake_case : List[Any] = src_lang if src_lang is not None else """eng_Latn"""
__snake_case : Dict = self.convert_tokens_to_ids(self._src_lang )
__snake_case : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def A_ ( self : List[str] , __a : str ) -> None:
'''simple docstring'''
__snake_case : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A_ ( self : Optional[int] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A_ ( self : Tuple , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = [self.sep_token_id]
__snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self : List[str] , __a : List[str] , __a : str , __a : Optional[str] , __a : Optional[str] , **__a : int ) -> Tuple:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__snake_case : int = src_lang
__snake_case : List[str] = self(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
__snake_case : Tuple = self.convert_tokens_to_ids(lowerCamelCase_ )
__snake_case : int = tgt_lang_id
return inputs
def A_ ( self : int , __a : List[str] , __a : str = "eng_Latn" , __a : Optional[List[str]] = None , __a : str = "fra_Latn" , **__a : Optional[Any] , ) -> BatchEncoding:
'''simple docstring'''
__snake_case : Optional[Any] = src_lang
__snake_case : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def A_ ( self : int ) -> Optional[int]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def A_ ( self : str ) -> str:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A_ ( self : Tuple , __a : int ) -> None:
'''simple docstring'''
__snake_case : Union[str, Any] = self.convert_tokens_to_ids(lowerCamelCase_ )
if self.legacy_behaviour:
__snake_case : str = []
__snake_case : int = [self.eos_token_id, self.cur_lang_code]
else:
__snake_case : Union[str, Any] = [self.cur_lang_code]
__snake_case : int = [self.eos_token_id]
__snake_case : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : Any , __a : str ) -> None:
'''simple docstring'''
__snake_case : Any = self.convert_tokens_to_ids(lowerCamelCase_ )
if self.legacy_behaviour:
__snake_case : Any = []
__snake_case : Any = [self.eos_token_id, self.cur_lang_code]
else:
__snake_case : Dict = [self.cur_lang_code]
__snake_case : List[str] = [self.eos_token_id]
__snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : List[str] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : Union[str, Any] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__snake_case : Optional[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 364
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A__ : int = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 0
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , )
)
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
lowerCAmelCase__ : Optional[int] = math.log(len(UpperCamelCase ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , UpperCamelCase , UpperCamelCase , UpperCamelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 37
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : List[Any] ):
_UpperCAmelCase : Union[str, Any] = []
def _A ( self : Any , A : Union[str, Any] , A : Optional[int] , A : List[str] , **A : Tuple ):
self.events.append("on_init_end" )
def _A ( self : Any , A : str , A : List[Any] , A : List[Any] , **A : Tuple ):
self.events.append("on_train_begin" )
def _A ( self : Tuple , A : List[str] , A : Tuple , A : int , **A : List[str] ):
self.events.append("on_train_end" )
def _A ( self : Optional[Any] , A : Dict , A : Any , A : Optional[Any] , **A : List[Any] ):
self.events.append("on_epoch_begin" )
def _A ( self : Optional[Any] , A : List[Any] , A : List[str] , A : Optional[int] , **A : Optional[int] ):
self.events.append("on_epoch_end" )
def _A ( self : List[str] , A : Optional[int] , A : List[Any] , A : Union[str, Any] , **A : Any ):
self.events.append("on_step_begin" )
def _A ( self : Tuple , A : Union[str, Any] , A : int , A : Optional[int] , **A : int ):
self.events.append("on_step_end" )
def _A ( self : Optional[int] , A : Optional[Any] , A : Union[str, Any] , A : str , **A : Union[str, Any] ):
self.events.append("on_evaluate" )
def _A ( self : Optional[Any] , A : Optional[int] , A : Dict , A : List[Any] , **A : Dict ):
self.events.append("on_predict" )
def _A ( self : Dict , A : Dict , A : List[Any] , A : Dict , **A : str ):
self.events.append("on_save" )
def _A ( self : Tuple , A : Optional[Any] , A : Union[str, Any] , A : Optional[int] , **A : Dict ):
self.events.append("on_log" )
def _A ( self : Optional[int] , A : Optional[Any] , A : Tuple , A : Tuple , **A : List[str] ):
self.events.append("on_prediction_step" )
@require_torch
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[int] ):
_UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
def _A ( self : List[Any] ):
shutil.rmtree(self.output_dir )
def _A ( self : Union[str, Any] , A : Optional[int]=0 , A : Optional[Any]=0 , A : Optional[Any]=64 , A : Dict=64 , A : Any=None , A : Tuple=False , **A : Optional[int] ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
_UpperCAmelCase : str = RegressionDataset(length=A )
_UpperCAmelCase : Union[str, Any] = RegressionDataset(length=A )
_UpperCAmelCase : Any = RegressionModelConfig(a=A , b=A )
_UpperCAmelCase : List[Any] = RegressionPreTrainedModel(A )
_UpperCAmelCase : Dict = TrainingArguments(self.output_dir , disable_tqdm=A , report_to=[] , **A )
return Trainer(
A , A , train_dataset=A , eval_dataset=A , callbacks=A , )
def _A ( self : str , A : List[str] , A : List[str] ):
self.assertEqual(len(A ) , len(A ) )
# Order doesn't matter
_UpperCAmelCase : Tuple = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ )
_UpperCAmelCase : Any = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ )
for cba, cba in zip(A , A ):
if isinstance(A , A ) and isinstance(A , A ):
self.assertEqual(A , A )
elif isinstance(A , A ) and not isinstance(A , A ):
self.assertEqual(A , cba.__class__ )
elif not isinstance(A , A ) and isinstance(A , A ):
self.assertEqual(cba.__class__ , A )
else:
self.assertEqual(A , A )
def _A ( self : int , A : List[str] ):
_UpperCAmelCase : List[str] = ["on_init_end", "on_train_begin"]
_UpperCAmelCase : str = 0
_UpperCAmelCase : Optional[Any] = len(trainer.get_eval_dataloader() )
_UpperCAmelCase : Optional[int] = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(A ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _A ( self : str ):
_UpperCAmelCase : Any = self.get_trainer()
_UpperCAmelCase : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# Callbacks passed at init are added to the default callbacks
_UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
_UpperCAmelCase : List[Any] = self.get_trainer(disable_tqdm=A )
_UpperCAmelCase : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
_UpperCAmelCase : Dict = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(A )
expected_callbacks.remove(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
_UpperCAmelCase : Optional[Any] = self.get_trainer()
_UpperCAmelCase : Any = trainer.pop_callback(A )
self.assertEqual(cb.__class__ , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
trainer.add_callback(A )
expected_callbacks.insert(0 , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# We can also add, pop, or remove by instance
_UpperCAmelCase : Union[str, Any] = self.get_trainer()
_UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(A )
expected_callbacks.remove(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
_UpperCAmelCase : List[Any] = self.get_trainer()
_UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[0]
_UpperCAmelCase : Union[str, Any] = trainer.pop_callback(A )
self.assertEqual(A , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
trainer.add_callback(A )
expected_callbacks.insert(0 , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
def _A ( self : Optional[Any] ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=A )
_UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
_UpperCAmelCase : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# Independent log/save/eval
_UpperCAmelCase : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
_UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
_UpperCAmelCase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
_UpperCAmelCase : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
_UpperCAmelCase : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
_UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
_UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
_UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# A bit of everything
_UpperCAmelCase : int = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
_UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
_UpperCAmelCase : Optional[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(A ) in warn_mock.call_args[0][0]
| 31
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
__A = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 278
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__A = logging.getLogger(__name__)
@dataclass
class lowercase_ :
UpperCamelCase_ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCamelCase_ : Optional[str] = field(
default=__lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
UpperCamelCase_ : bool = field(default=__lowercase , metadata={"help": "Whether tp freeze the encoder."} )
UpperCamelCase_ : bool = field(default=__lowercase , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class lowercase_ :
UpperCamelCase_ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCamelCase_ : Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
UpperCamelCase_ : Optional[int] = field(
default=1_0_2_4 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=1_2_8 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=1_4_2 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=1_4_2 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCamelCase_ : Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
UpperCamelCase_ : Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
UpperCamelCase_ : Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
UpperCamelCase_ : Optional[str] = field(default=__lowercase , metadata={"help": "Source language id for translation."} )
UpperCamelCase_ : Optional[str] = field(default=__lowercase , metadata={"help": "Target language id for translation."} )
UpperCamelCase_ : Optional[int] = field(default=__lowercase , metadata={"help": "# num_beams to use for evaluation."} )
UpperCamelCase_ : bool = field(
default=__lowercase , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(_UpperCamelCase , os.path.join(_UpperCamelCase , F"""{split}_results.json""" ) )
def snake_case_() -> List[Any]:
"""simple docstring"""
_snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case, _snake_case, _snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case, _snake_case, _snake_case = parser.parse_args_into_dataclasses()
check_output_dir(_UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , _UpperCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
assert hasattr(_UpperCamelCase , _UpperCamelCase ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(_UpperCamelCase , _UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
_snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=_UpperCamelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_UpperCamelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_UpperCamelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_UpperCamelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_snake_case = SeqaSeqDataset
# Get datasets
_snake_case = (
dataset_class(
_UpperCamelCase , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
_snake_case = (
dataset_class(
_UpperCamelCase , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_snake_case = (
dataset_class(
_UpperCamelCase , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_snake_case = (
build_compute_metrics_fn(data_args.task , _UpperCamelCase ) if training_args.predict_with_generate else None
)
_snake_case = SeqaSeqTrainer(
model=_UpperCamelCase , args=_UpperCamelCase , data_args=_UpperCamelCase , train_dataset=_UpperCamelCase , eval_dataset=_UpperCamelCase , data_collator=SeqaSeqDataCollator(
_UpperCamelCase , _UpperCamelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
_snake_case = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
_snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_snake_case = train_result.metrics
_snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , _UpperCamelCase , training_args.output_dir )
all_metrics.update(_UpperCamelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_snake_case = trainer.evaluate(metric_key_prefix='''val''' )
_snake_case = data_args.n_val
_snake_case = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , _UpperCamelCase , training_args.output_dir )
all_metrics.update(_UpperCamelCase )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
_snake_case = trainer.predict(test_dataset=_UpperCamelCase , metric_key_prefix='''test''' )
_snake_case = test_output.metrics
_snake_case = data_args.n_test
if trainer.is_world_process_zero():
_snake_case = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , _UpperCamelCase , training_args.output_dir )
all_metrics.update(_UpperCamelCase )
if training_args.predict_with_generate:
_snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
_snake_case = lmap(str.strip , _UpperCamelCase )
write_txt_file(_UpperCamelCase , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(_UpperCamelCase , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def snake_case_(_UpperCamelCase ) -> List[str]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 278
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : Optional[int] =TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
lowerCamelCase_ : str ={
'''input_ids''': tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowerCamelCase_ : Optional[Any] =model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
lowerCamelCase_ : int =tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice.
lowerCamelCase_ : Union[str, Any] =tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 144
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE( A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = ConsistencyModelPipeline
SCREAMING_SNAKE_CASE_ : Any = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE_ : Dict = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
SCREAMING_SNAKE_CASE_ : Optional[Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' ,subfolder='''test_unet''' ,)
return unet
@property
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' ,subfolder='''test_unet_class_cond''' ,)
return unet
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=False ) -> Union[str, Any]:
"""simple docstring"""
if class_cond:
__SCREAMING_SNAKE_CASE :str = self.dummy_cond_unet
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
__SCREAMING_SNAKE_CASE :List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,)
__SCREAMING_SNAKE_CASE :List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=0 ) -> Dict:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE :Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE :List[str] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE :Optional[Any] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE :List[str] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE :List[Any] = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = 0
__SCREAMING_SNAKE_CASE :Optional[int] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE :Dict = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :List[Any] = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE :Tuple = self.get_dummy_components()
__SCREAMING_SNAKE_CASE :Any = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = 1
__SCREAMING_SNAKE_CASE :List[str] = None
__SCREAMING_SNAKE_CASE :List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE :List[str] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :int = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE :Any = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = 1
__SCREAMING_SNAKE_CASE :Optional[Any] = None
__SCREAMING_SNAKE_CASE :List[Any] = 0
__SCREAMING_SNAKE_CASE :Any = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE :int = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :Optional[Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__="cpu" ,SCREAMING_SNAKE_CASE__=torch.floataa ,SCREAMING_SNAKE_CASE__=(1, 3, 64, 64) ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
__SCREAMING_SNAKE_CASE :int = self.get_fixed_latents(seed=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__ ,shape=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = latents
return inputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__="cpu" ,SCREAMING_SNAKE_CASE__=torch.floataa ,SCREAMING_SNAKE_CASE__=(1, 3, 64, 64) ) -> int:
"""simple docstring"""
if type(SCREAMING_SNAKE_CASE__ ) == str:
__SCREAMING_SNAKE_CASE :int = torch.device(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = randn_tensor(SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__ )
return latents
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE :List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,)
__SCREAMING_SNAKE_CASE :Dict = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = self.get_inputs()
__SCREAMING_SNAKE_CASE :List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE :Union[str, Any] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :Dict = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE :List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,)
__SCREAMING_SNAKE_CASE :Any = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = self.get_inputs()
__SCREAMING_SNAKE_CASE :int = 1
__SCREAMING_SNAKE_CASE :int = None
__SCREAMING_SNAKE_CASE :Union[str, Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE :str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :List[str] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE :Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,)
__SCREAMING_SNAKE_CASE :Any = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ ,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ ,enable_math=SCREAMING_SNAKE_CASE__ ,enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Optional[Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE :List[str] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :List[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE :Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,)
__SCREAMING_SNAKE_CASE :int = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ ,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = 1
__SCREAMING_SNAKE_CASE :int = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ ,enable_math=SCREAMING_SNAKE_CASE__ ,enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :str = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE :str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :Optional[int] = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 191
| 0
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a__ : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
a__ : Any = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
__SCREAMING_SNAKE_CASE = self.diffusers_dir
shutil.copy(
os.path.join(UpperCAmelCase__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def UpperCAmelCase_ ( self : Tuple ) -> str:
__SCREAMING_SNAKE_CASE = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int]=None ) -> List[str]:
__SCREAMING_SNAKE_CASE = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
__SCREAMING_SNAKE_CASE = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
__SCREAMING_SNAKE_CASE = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
__SCREAMING_SNAKE_CASE = black.format_str(UpperCAmelCase__ , mode=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = os.path.join(self.diffusers_dir , "new_code.py" )
with open(UpperCAmelCase__ , "w" , newline="\n" ) as f:
f.write(UpperCAmelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCAmelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCAmelCase__ )
with open(UpperCAmelCase__ , "r" ) as f:
self.assertTrue(f.read() , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , UpperCAmelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , UpperCAmelCase__ ) , )
# Copy consistency with a really long name
__SCREAMING_SNAKE_CASE = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , UpperCAmelCase__ , UpperCAmelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , UpperCAmelCase__ , overwrite_result=re.sub("DDPM" , "Test" , UpperCAmelCase__ ) , )
| 195
|
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
a__ : Optional[Any] = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(s_dict.keys() )
for key in keys:
__SCREAMING_SNAKE_CASE = R".*/layers_(\d+)"
__SCREAMING_SNAKE_CASE = key
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = re.match(lowerCAmelCase_ , lowerCAmelCase_ ).groups()
if groups[0] == "encoder":
__SCREAMING_SNAKE_CASE = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase_ )
elif groups[0] == "decoder":
__SCREAMING_SNAKE_CASE = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__SCREAMING_SNAKE_CASE = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""{key} -> {new_key}""" )
__SCREAMING_SNAKE_CASE = s_dict.pop(lowerCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__SCREAMING_SNAKE_CASE = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__SCREAMING_SNAKE_CASE = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__SCREAMING_SNAKE_CASE = s_dict[key].shape[0]
__SCREAMING_SNAKE_CASE = s_dict[key]
for idx in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = expert_weihts[idx]
print(f"""{key} -> {key.replace('expert/' , 'nested fstring' )}""" )
s_dict.pop(lowerCAmelCase_ )
return s_dict
a__ : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
import regex as re
with open(lowerCAmelCase_ , "r" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__SCREAMING_SNAKE_CASE = float(lowerCAmelCase_ ) if "." in value else int(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase_ )[0]
__SCREAMING_SNAKE_CASE = str(activation[1] )
__SCREAMING_SNAKE_CASE = num_experts
__SCREAMING_SNAKE_CASE = SwitchTransformersConfig(**lowerCAmelCase_ )
return config
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_="./" , lowerCAmelCase_=8 ):
'''simple docstring'''
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
__SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
if gin_file is not None:
__SCREAMING_SNAKE_CASE = convert_gin_to_config(lowerCAmelCase_ , lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = SwitchTransformersConfig.from_pretrained(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = SwitchTransformersForConditionalGeneration(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = flax_params["target"]
__SCREAMING_SNAKE_CASE = flatten_dict(lowerCAmelCase_ , sep="/" )
__SCREAMING_SNAKE_CASE = rename_keys(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = unflatten_dict(lowerCAmelCase_ , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
a__ : Tuple = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 195
| 1
|
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_a : Any = VQModel
_a : Tuple = 'sample'
@property
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=(32, 32) ) -> Any:
"""simple docstring"""
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(_SCREAMING_SNAKE_CASE )
return {"sample": image}
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
return (3, 32, 32)
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = VQModel.from_pretrained('fusing/vqgan-dummy' )
model.to(_SCREAMING_SNAKE_CASE ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_UpperCAmelCase = image.to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 329
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ :Optional[int] = logging.getLogger(__name__)
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=a__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=a__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=a__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=a__ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=a__ , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=a__ , type=a__ , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=a__ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=a__ , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
_UpperCAmelCase = parser.parse_args()
return args
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
def fn(a__: str ):
return tokenizer(examples['text'] )
return fn
def lowerCAmelCase__ ( a__: List[str] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(len(tokenized_data['input_ids'] ) ):
_UpperCAmelCase = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
_UpperCAmelCase = tf.train.Features(feature=a__ )
_UpperCAmelCase = tf.train.Example(features=a__ )
_UpperCAmelCase = example.SerializeToString()
records.append(a__ )
return records
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_UpperCAmelCase = min(len(a__ ) , args.limit )
_UpperCAmelCase = dataset.select(range(a__ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(a__ ):
os.makedirs(a__ )
else:
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_UpperCAmelCase = tokenize_function(a__ )
_UpperCAmelCase = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(a__: Optional[int] ):
# Concatenate all texts.
_UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
_UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_UpperCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_UpperCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_UpperCAmelCase = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_0_0_0 , num_proc=4 )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for shard in range(0 , len(a__ ) , args.shard_size ):
_UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size]
_UpperCAmelCase = len(dataset_snapshot['input_ids'] )
_UpperCAmelCase = os.path.join(a__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
_UpperCAmelCase = get_serialized_examples(a__ )
with tf.io.TFRecordWriter(a__ ) as out_file:
for i in range(len(a__ ) ):
_UpperCAmelCase = serialized_examples[i]
out_file.write(a__ )
print('Wrote file {} containing {} records'.format(a__ , a__ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , 'w' ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=a__ )
if __name__ == "__main__":
lowerCAmelCase__ :str = parse_args()
main(args)
| 329
| 1
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_A : int = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
return max(metric_fn(UpperCAmelCase , UpperCAmelCase ) for gt in ground_truths )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : List[Any] = [line.strip() for line in open(UpperCAmelCase , '''r''' ).readlines()]
lowerCamelCase__ : str = []
if args.gold_data_mode == "qa":
lowerCamelCase__ : int = pd.read_csv(UpperCAmelCase , sep='''\t''' , header=UpperCAmelCase )
for answer_list in data[1]:
lowerCamelCase__ : List[Any] = ast.literal_eval(UpperCAmelCase )
answers.append(UpperCAmelCase )
else:
lowerCamelCase__ : List[str] = [line.strip() for line in open(UpperCAmelCase , '''r''' ).readlines()]
lowerCamelCase__ : Union[str, Any] = [[reference] for reference in references]
lowerCamelCase__ : Optional[Any] = 0
for prediction, ground_truths in zip(UpperCAmelCase , UpperCAmelCase ):
total += 1
em += metric_max_over_ground_truths(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
fa += metric_max_over_ground_truths(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = 1_00.0 * em / total
lowerCamelCase__ : List[str] = 1_00.0 * fa / total
logger.info(f"F1: {fa:.2f}" )
logger.info(f"EM: {em:.2f}" )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = args.k
lowerCamelCase__ : Tuple = [line.strip() for line in open(UpperCAmelCase , '''r''' ).readlines()]
lowerCamelCase__ : int = [line.strip() for line in open(UpperCAmelCase , '''r''' ).readlines()]
lowerCamelCase__ : List[Any] = 0
for hypo, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : Tuple = set(hypo.split('''\t''' )[:k] )
lowerCamelCase__ : int = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
lowerCamelCase__ : Tuple = 1_00.0 * em / total
logger.info(f"Precision@{k}: {em: .2f}" )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
def strip_title(UpperCAmelCase ):
if title.startswith('''"''' ):
lowerCamelCase__ : Optional[int] = title[1:]
if title.endswith('''"''' ):
lowerCamelCase__ : List[str] = title[:-1]
return title
lowerCamelCase__ : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase , return_tensors='''pt''' , padding=UpperCAmelCase , truncation=UpperCAmelCase , )['''input_ids'''].to(args.device )
lowerCamelCase__ : str = rag_model.rag.question_encoder(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = question_enc_outputs[0]
lowerCamelCase__ : int = rag_model.retriever(
UpperCAmelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
lowerCamelCase__ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
lowerCamelCase__ : Optional[int] = []
for docs in all_docs:
lowerCamelCase__ : List[Any] = [strip_title(UpperCAmelCase ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(UpperCAmelCase ) )
return provenance_strings
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
lowerCamelCase__ : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase , return_tensors='''pt''' , padding=UpperCAmelCase , truncation=UpperCAmelCase )
lowerCamelCase__ : str = inputs_dict.input_ids.to(args.device )
lowerCamelCase__ : int = inputs_dict.attention_mask.to(args.device )
lowerCamelCase__ : Optional[int] = rag_model.generate( # rag_model overwrites generate
UpperCAmelCase , attention_mask=UpperCAmelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=UpperCAmelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
lowerCamelCase__ : Dict = rag_model.retriever.generator_tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
if args.print_predictions:
for q, a in zip(UpperCAmelCase , UpperCAmelCase ):
logger.info('''Q: {} - A: {}'''.format(UpperCAmelCase , UpperCAmelCase ) )
return answers
def _a ( ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=UpperCAmelCase , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=UpperCAmelCase , choices=['''exact''', '''compressed''', '''legacy'''] , type=UpperCAmelCase , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=UpperCAmelCase , type=UpperCAmelCase , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=UpperCAmelCase , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=UpperCAmelCase , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=UpperCAmelCase , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=UpperCAmelCase , type=UpperCAmelCase , required=UpperCAmelCase , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=UpperCAmelCase , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=UpperCAmelCase , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=UpperCAmelCase , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=UpperCAmelCase , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=UpperCAmelCase , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=UpperCAmelCase , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
lowerCamelCase__ : Dict = parser.parse_args()
lowerCamelCase__ : Union[str, Any] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def _a ( UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : List[str] = {}
if args.model_type is None:
lowerCamelCase__ : Tuple = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
lowerCamelCase__ : Optional[Any] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
lowerCamelCase__ : Optional[int] = args.n_docs
if args.index_name is not None:
lowerCamelCase__ : Optional[Any] = args.index_name
if args.index_path is not None:
lowerCamelCase__ : int = args.index_path
else:
lowerCamelCase__ : List[str] = BartForConditionalGeneration
lowerCamelCase__ : Any = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , UpperCAmelCase )
lowerCamelCase__ : str = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
lowerCamelCase__ : List[Any] = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(UpperCAmelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(UpperCAmelCase ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
lowerCamelCase__ : str = RagRetriever.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = model_class.from_pretrained(UpperCAmelCase , retriever=UpperCAmelCase , **UpperCAmelCase )
model.retriever.init_retrieval()
else:
lowerCamelCase__ : Optional[Any] = model_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
lowerCamelCase__ : int = []
for line in tqdm(UpperCAmelCase ):
questions.append(line.strip() )
if len(UpperCAmelCase ) == args.eval_batch_size:
lowerCamelCase__ : str = evaluate_batch_fn(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
preds_file.write('''\n'''.join(UpperCAmelCase ) + '''\n''' )
preds_file.flush()
lowerCamelCase__ : List[Any] = []
if len(UpperCAmelCase ) > 0:
lowerCamelCase__ : Tuple = evaluate_batch_fn(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
preds_file.write('''\n'''.join(UpperCAmelCase ) )
preds_file.flush()
score_fn(UpperCAmelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_A : Dict = get_args()
main(args)
| 265
|
import os
from pathlib import Path
def _a ( ) -> Tuple:
"""simple docstring"""
from torch.utils.cpp_extension import load
lowerCamelCase__ : List[Any] = Path(UpperCAmelCase ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
lowerCamelCase__ : Any = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , UpperCAmelCase , with_cuda=UpperCAmelCase , extra_include_paths=[str(UpperCAmelCase )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 265
| 1
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _a ( lowerCamelCase: dict ) -> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def _a ( lowerCamelCase: np.ndarray , lowerCamelCase: np.ndarray , lowerCamelCase: np.ndarray ) -> np.ndarray:
'''simple docstring'''
__A = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCamelCase , lowerCamelCase )
# Predict target for test data
__A = xgb.predict(lowerCamelCase )
__A = predictions.reshape(len(lowerCamelCase ) , 1 )
return predictions
def _a ( ) -> None:
'''simple docstring'''
__A = fetch_california_housing()
__A , __A = data_handling(lowerCamelCase )
__A , __A , __A , __A = train_test_split(
lowerCamelCase , lowerCamelCase , test_size=0.25 , random_state=1 )
__A = xgboost(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(lowerCamelCase , lowerCamelCase )}""" )
print(F"""Mean Square Error : {mean_squared_error(lowerCamelCase , lowerCamelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 117
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
snake_case__ : Union[str, Any] = 500000
snake_case__ , snake_case__ : Optional[Any] = os.path.split(__file__)
snake_case__ : List[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _a ( lowerCamelCase: datasets.Dataset , **lowerCamelCase: Optional[int] ) -> str:
'''simple docstring'''
__A = dataset.map(**lowerCamelCase )
@get_duration
def _a ( lowerCamelCase: datasets.Dataset , **lowerCamelCase: Optional[int] ) -> str:
'''simple docstring'''
__A = dataset.filter(**lowerCamelCase )
def _a ( ) -> List[Any]:
'''simple docstring'''
__A = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
__A = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
__A = generate_example_dataset(
os.path.join(lowerCamelCase , '''dataset.arrow''' ) , lowerCamelCase , num_examples=lowerCamelCase )
__A = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase )
def tokenize(lowerCamelCase: List[str] ):
return tokenizer(examples['''text'''] )
__A = map(lowerCamelCase )
__A = map(lowerCamelCase , batched=lowerCamelCase )
__A = map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase )
with dataset.formatted_as(type='''numpy''' ):
__A = map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase )
with dataset.formatted_as(type='''pandas''' ):
__A = map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
__A = map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
__A = map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase )
__A = map(lowerCamelCase , function=lowerCamelCase , batched=lowerCamelCase )
__A = filter(lowerCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 117
| 1
|
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__snake_case =logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : List[Any] ) -> List[Any]:
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = eval_examples
lowerCAmelCase = post_process_function
lowerCAmelCase = quant_trainer_args
lowerCAmelCase = 1_2_8 # default number of calibration samples
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : List[str]=None ) -> str:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.' )
lowerCAmelCase = calib_dataset if calib_dataset is not None else self.calib_dataset
lowerCAmelCase = self._remove_unused_columns(UpperCAmelCase__ , description='Calibration' )
return DataLoader(
UpperCAmelCase__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCAmelCase__ , )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Dict=None ) -> Optional[int]:
lowerCAmelCase = self.train_dataset if calib_dataset is None else calib_dataset
lowerCAmelCase = self.get_calib_dataloader(UpperCAmelCase__ )
lowerCAmelCase = self.model
quant_trainer.configure_model(UpperCAmelCase__ , self.quant_trainer_args , calib=UpperCAmelCase__ )
model.eval()
quant_trainer.enable_calibration(UpperCAmelCase__ )
logger.info('***** Running calibration *****' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(UpperCAmelCase__ ):
# Prediction step
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.prediction_step(UpperCAmelCase__ , UpperCAmelCase__ , prediction_loss_only=UpperCAmelCase__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCAmelCase__ , self.quant_trainer_args )
lowerCAmelCase = model
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str = "eval" ) -> int:
lowerCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase = self.get_eval_dataloader(UpperCAmelCase__ )
lowerCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase = self.compute_metrics
lowerCAmelCase = None
lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase = eval_loop(
UpperCAmelCase__ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , )
finally:
lowerCAmelCase = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowerCAmelCase = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , output.predictions )
lowerCAmelCase = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowerCAmelCase = metrics.pop(UpperCAmelCase__ )
self.log(UpperCAmelCase__ )
else:
lowerCAmelCase = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase__ )
return metrics
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : str = "test" ) -> List[str]:
lowerCAmelCase = self.get_test_dataloader(UpperCAmelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase = self.compute_metrics
lowerCAmelCase = None
lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase = eval_loop(
UpperCAmelCase__ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , )
finally:
lowerCAmelCase = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , output.predictions , 'predict' )
lowerCAmelCase = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowerCAmelCase = metrics.pop(UpperCAmelCase__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any]="./" ) -> str:
lowerCAmelCase = self.eval_dataset
lowerCAmelCase = self.get_eval_dataloader(UpperCAmelCase__ )
lowerCAmelCase = next(iter(UpperCAmelCase__ ) )
# saving device - to make it consistent
lowerCAmelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
# convert to tuple
lowerCAmelCase = tuple(v.to(UpperCAmelCase__ ) for k, v in batch.items() )
logger.info('Converting model to be onnx compatible' )
from pytorch_quantization.nn import TensorQuantizer
lowerCAmelCase = True
lowerCAmelCase = self.model.to(UpperCAmelCase__ )
model.eval()
model.float()
lowerCAmelCase = model.module if hasattr(UpperCAmelCase__ , 'module' ) else model
quant_trainer.configure_model(UpperCAmelCase__ , self.quant_trainer_args )
lowerCAmelCase = os.path.join(UpperCAmelCase__ , 'model.onnx' )
logger.info(F'''exporting model to {output_model_file}''' )
lowerCAmelCase = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , export_params=UpperCAmelCase__ , opset_version=1_3 , do_constant_folding=UpperCAmelCase__ , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=UpperCAmelCase__ , )
logger.info('onnx export finished' )
| 55
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : List[str]=9_9 , UpperCAmelCase__ : Tuple=3_2 , UpperCAmelCase__ : str=5 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Any=3_7 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Dict=5_1_2 , UpperCAmelCase__ : Dict=1_6 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : List[str]=None , ) -> str:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def __UpperCAmelCase ( self : Any ) -> List[str]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=UpperCAmelCase__ , )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> int:
lowerCAmelCase = FalconModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , ) -> Tuple:
lowerCAmelCase = True
lowerCAmelCase = FalconModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , ) -> List[str]:
lowerCAmelCase = FalconForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , ) -> List[str]:
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = FalconForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['hidden_states'][0]
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['hidden_states'][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : Tuple = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase : Dict = (FalconForCausalLM,) if is_torch_available() else ()
lowerCamelCase : int = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : int = False
lowerCamelCase : Union[str, Any] = False
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
lowerCAmelCase = FalconModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Any ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Tuple:
lowerCAmelCase , *lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowerCAmelCase = alibi
self.model_tester.create_and_check_model(UpperCAmelCase__ , *UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(UpperCAmelCase__ )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Any ) -> Dict:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'single_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(UpperCAmelCase__ )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Tuple ) -> int:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = FalconForCausalLM(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model._convert_to_rw_cache(result.past_key_values )
lowerCAmelCase = model._convert_cache_to_standard_format(UpperCAmelCase__ , UpperCAmelCase__ )
for layer in range(len(UpperCAmelCase__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'multi_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(UpperCAmelCase__ )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(UpperCAmelCase__ , 'use_cache' ):
return
lowerCAmelCase = model_class(UpperCAmelCase__ ).to(UpperCAmelCase__ )
if "use_cache" not in inputs:
lowerCAmelCase = True
lowerCAmelCase = model(**UpperCAmelCase__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowerCAmelCase = (
getattr(UpperCAmelCase__ , 'decoder_layers' , UpperCAmelCase__ )
or getattr(UpperCAmelCase__ , 'num_decoder_layers' , UpperCAmelCase__ )
or config.num_hidden_layers
)
lowerCAmelCase = getattr(UpperCAmelCase__ , 'num_kv_heads' , config.num_attention_heads )
lowerCAmelCase = getattr(UpperCAmelCase__ , 'd_model' , config.hidden_size )
lowerCAmelCase = embed_dim // num_attention_heads
lowerCAmelCase = outputs['past_key_values']
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
lowerCAmelCase , lowerCAmelCase = inputs['input_ids'].shape
for i in range(UpperCAmelCase__ ):
if config.new_decoder_architecture:
lowerCAmelCase = config.num_attention_heads
elif config.multi_query:
lowerCAmelCase = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
lowerCAmelCase = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(UpperCAmelCase__ )
lowerCAmelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase__ )
lowerCAmelCase = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
lowerCAmelCase = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=1_9 )
lowerCAmelCase = tokenizer.batch_decode(UpperCAmelCase__ )[0]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = FalconForCausalLM.from_pretrained(UpperCAmelCase__ )
model.eval()
model.to(UpperCAmelCase__ )
lowerCAmelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 )
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 )
model.generate(**UpperCAmelCase__ , num_beams=2 , max_new_tokens=4 )
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = FalconForCausalLM.from_pretrained(UpperCAmelCase__ )
model.eval()
model.to(device=UpperCAmelCase__ )
lowerCAmelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase__ )
# Test results are the same with and without cache
lowerCAmelCase = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=2_0 , use_cache=UpperCAmelCase__ )
lowerCAmelCase = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=2_0 , use_cache=UpperCAmelCase__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 55
| 1
|
'''simple docstring'''
def A__ ( UpperCAmelCase_ = 1_0_0_0_0_0_0 ):
_UpperCamelCase : List[Any] = set(range(3 , UpperCAmelCase_ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCAmelCase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCAmelCase_ , UpperCAmelCase_ ) ) )
_UpperCamelCase : Dict = [float(UpperCAmelCase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 83
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _A ( ):
"""simple docstring"""
a =ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=lowercase , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=lowercase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=lowercase )
return parser.parse_args()
def _A ( ):
"""simple docstring"""
a =parse_args()
# Import training_script as a module.
a =Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
a =script_fpath.stem
a =importlib.import_module(lowercase )
# Patch sys.argv
a =[args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 81
| 0
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Optional[Any] = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "segformer"
def __init__( self : int ,A : Dict=3 ,A : Optional[Any]=4 ,A : Dict=[2, 2, 2, 2] ,A : Optional[Any]=[8, 4, 2, 1] ,A : List[str]=[32, 64, 1_60, 2_56] ,A : Any=[7, 3, 3, 3] ,A : int=[4, 2, 2, 2] ,A : Tuple=[1, 2, 5, 8] ,A : Optional[Any]=[4, 4, 4, 4] ,A : List[str]="gelu" ,A : int=0.0 ,A : Any=0.0 ,A : int=0.1 ,A : Tuple=0.02 ,A : int=0.1 ,A : Dict=1E-6 ,A : Any=2_56 ,A : Dict=2_55 ,**A : Union[str, Any] ,):
super().__init__(**A )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." ,A ,)
__A = num_channels
__A = num_encoder_blocks
__A = depths
__A = sr_ratios
__A = hidden_sizes
__A = patch_sizes
__A = strides
__A = mlp_ratios
__A = num_attention_heads
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = classifier_dropout_prob
__A = initializer_range
__A = drop_path_rate
__A = layer_norm_eps
__A = decoder_hidden_size
__A = kwargs.get("reshape_last_stage" ,A )
__A = semantic_loss_ignore_index
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = version.parse("1.11" )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCamelCase_ ( self : List[str] ):
return 1E-4
@property
def UpperCamelCase_ ( self : List[Any] ):
return 12
| 124
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Union[str, Any] = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "bloom"
snake_case_ = ["past_key_values"]
snake_case_ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Optional[Any] ,A : List[Any]=25_08_80 ,A : Optional[int]=64 ,A : List[Any]=2 ,A : Optional[int]=8 ,A : str=1E-5 ,A : str=0.02 ,A : int=True ,A : Optional[Any]=1 ,A : int=2 ,A : str=False ,A : Dict=0.0 ,A : List[Any]=0.0 ,A : str=1 ,A : List[Any]=False ,**A : List[Any] ,):
__A = vocab_size
# Backward compatibility with n_embed kwarg
__A = kwargs.pop("n_embed" ,A )
__A = hidden_size if n_embed is None else n_embed
__A = n_layer
__A = n_head
__A = layer_norm_epsilon
__A = initializer_range
__A = use_cache
__A = pretraining_tp
__A = apply_residual_connection_post_layernorm
__A = hidden_dropout
__A = attention_dropout
__A = bos_token_id
__A = eos_token_id
__A = slow_but_exact
super().__init__(bos_token_id=A ,eos_token_id=A ,**A )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = version.parse("1.12" )
def __init__( self : str ,A : PretrainedConfig ,A : str = "default" ,A : List[PatchingSpec] = None ,A : bool = False ,):
super().__init__(A ,task=A ,patching_specs=A ,use_past=A )
if not getattr(self._config ,"pad_token_id" ,A ):
# TODO: how to do that better?
__A = 0
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(A ,direction="inputs" ,inverted_values_shape=A )
__A = {0: "batch", 1: "past_sequence + sequence"}
else:
__A = {0: "batch", 1: "sequence"}
return common_inputs
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return self._config.n_layer
@property
def UpperCamelCase_ ( self : List[Any] ):
return self._config.n_head
@property
def UpperCamelCase_ ( self : Optional[int] ):
return 1E-3
def UpperCamelCase_ ( self : Any ,A : "PreTrainedTokenizer" ,A : int = -1 ,A : int = -1 ,A : bool = False ,A : Optional["TensorType"] = None ,):
__A = super(A ,self ).generate_dummy_inputs(
A ,batch_size=A ,seq_length=A ,is_pair=A ,framework=A )
# We need to order the input in the way they appears in the forward()
__A = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__A , __A = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__A = seqlen + 2
__A = self._config.hidden_size // self.num_attention_heads
__A = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__A = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__A = [
(torch.zeros(A ), torch.zeros(A )) for _ in range(self.num_layers )
]
__A = common_inputs["attention_mask"]
if self.use_past:
__A = ordered_inputs["attention_mask"].dtype
__A = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(A ,A ,dtype=A )] ,dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self : int ):
return 13
| 124
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCamelCase ( _a ):
lowercase = '''roc_bert'''
def __init__( self ,__UpperCamelCase=3_0522 ,__UpperCamelCase=768 ,__UpperCamelCase=12 ,__UpperCamelCase=12 ,__UpperCamelCase=3072 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-12 ,__UpperCamelCase=True ,__UpperCamelCase=0 ,__UpperCamelCase="absolute" ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=768 ,__UpperCamelCase=910 ,__UpperCamelCase=512 ,__UpperCamelCase=2_4858 ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> List[str]:
'''simple docstring'''
lowercase_ : int = vocab_size
lowercase_ : List[Any] = max_position_embeddings
lowercase_ : Dict = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : Union[str, Any] = intermediate_size
lowercase_ : Tuple = hidden_act
lowercase_ : Tuple = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : Optional[int] = initializer_range
lowercase_ : int = type_vocab_size
lowercase_ : Dict = layer_norm_eps
lowercase_ : Union[str, Any] = use_cache
lowercase_ : Optional[int] = enable_pronunciation
lowercase_ : Union[str, Any] = enable_shape
lowercase_ : int = pronunciation_embed_dim
lowercase_ : Optional[Any] = pronunciation_vocab_size
lowercase_ : Dict = shape_embed_dim
lowercase_ : List[Any] = shape_vocab_size
lowercase_ : Optional[int] = concat_input
lowercase_ : Tuple = position_embedding_type
lowercase_ : Any = classifier_dropout
super().__init__(pad_token_id=__UpperCamelCase ,**__UpperCamelCase )
| 213
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = ''''''
lowerCamelCase_ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCamelCase_ : str = None # compression type in fsspec. ex: "gzip"
lowerCamelCase_ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__(self , __magic_name__ = "" , __magic_name__ = None , __magic_name__ = None , **__magic_name__ ) -> Any:
'''simple docstring'''
super().__init__(self , **__magic_name__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case_ : Union[str, Any] = fsspec.open(
__magic_name__ , mode='''rb''' , protocol=__magic_name__ , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case_ : Tuple = os.path.basename(self.file.path.split('''::''' )[0] )
snake_case_ : Optional[Any] = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
snake_case_ : Dict = None
@classmethod
def lowerCamelCase (cls , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
return super()._strip_protocol(__magic_name__ ).lstrip('''/''' )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
if self.dir_cache is None:
snake_case_ : Optional[int] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
snake_case_ : List[str] = {f['''name''']: f}
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return self.file.open().read()
def lowerCamelCase (self , __magic_name__ , __magic_name__ = "rb" , __magic_name__=None , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = self._strip_protocol(__magic_name__ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = '''bz2'''
lowerCamelCase_ : Any = '''bz2'''
lowerCamelCase_ : int = '''.bz2'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = '''gzip'''
lowerCamelCase_ : Dict = '''gzip'''
lowerCamelCase_ : int = '''.gz'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''lz4'''
lowerCamelCase_ : Any = '''lz4'''
lowerCamelCase_ : Optional[Any] = '''.lz4'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''xz'''
lowerCamelCase_ : Any = '''xz'''
lowerCamelCase_ : int = '''.xz'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = '''zstd'''
lowerCamelCase_ : Tuple = '''zstd'''
lowerCamelCase_ : Any = '''.zst'''
def __init__(self , __magic_name__ , __magic_name__ = "rb" , __magic_name__ = None , __magic_name__ = None , __magic_name__ = DEFAULT_BLOCK_SIZE , **__magic_name__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
fo=__magic_name__ , mode=__magic_name__ , target_protocol=__magic_name__ , target_options=__magic_name__ , block_size=__magic_name__ , **__magic_name__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case_ : Dict = self.file.__enter__
class __lowerCAmelCase :
def __init__(self , __magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = file_
def __enter__(self ) -> List[Any]:
'''simple docstring'''
self._file.__enter__()
return self
def __exit__(self , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
self._file.__exit__(*__magic_name__ , **__magic_name__ )
def __iter__(self ) -> Optional[int]:
'''simple docstring'''
return iter(self._file )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return next(self._file )
def __getattr__(self , __magic_name__ ) -> str:
'''simple docstring'''
return getattr(self._file , __magic_name__ )
def fixed_enter(*__magic_name__ , **__magic_name__ ):
return WrappedFile(_enter(*__magic_name__ , **__magic_name__ ) )
snake_case_ : Tuple = fixed_enter
| 279
| 0
|
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : Union[str, Any] = [0] * len(UpperCamelCase__ )
for i in range(1 , len(UpperCamelCase__ ) ):
# use last results for better performance - dynamic programming
_UpperCAmelCase : List[str] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_UpperCAmelCase : Optional[int] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_UpperCAmelCase : Tuple = j
return prefix_result
def lowerCamelCase_ (UpperCamelCase__ : str ):
return max(prefix_function(UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Dict:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
_UpperCAmelCase : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
_UpperCAmelCase : List[str] = DisjunctiveConstraint(A )
self.assertTrue(isinstance(dc.token_ids , A ) )
with self.assertRaises(A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self ) -> List[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
_UpperCAmelCase : Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(A ):
DisjunctiveConstraint(A ) # fails here
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Optional[Any] = [[1, 2, 3], [1, 2, 4]]
_UpperCAmelCase : Optional[int] = DisjunctiveConstraint(A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = dc.update(1 )
_UpperCAmelCase : Dict = stepped is True and completed is False and reset is False
self.assertTrue(A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(2 )
_UpperCAmelCase : Any = stepped is True and completed is False and reset is False
self.assertTrue(A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = dc.update(3 )
_UpperCAmelCase : Optional[int] = stepped is True and completed is True and reset is False
self.assertTrue(A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Tuple = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_UpperCAmelCase : Any = DisjunctiveConstraint(A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 68
| 1
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase ( __lowerCamelCase : str ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__lowerCamelCase ):
return ext
raise Exception(
f"""Unable to determine file format from file extension {path}. """
f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def UpperCamelCase ( __lowerCamelCase : Tuple ):
snake_case : Any = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
snake_case : int = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
snake_case : Optional[int] = PipelineDataFormat.from_str(
format=__lowerCamelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__lowerCamelCase , __lowerCamelCase )
class UpperCAmelCase ( A_ ):
def __init__(self : str , snake_case__ : Pipeline , snake_case__ : PipelineDataFormat ) -> Dict:
'''simple docstring'''
snake_case : int = nlp
snake_case : Union[str, Any] = reader
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : ArgumentParser ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=snake_case__ , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=snake_case__ , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=snake_case__ , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=snake_case__ , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=snake_case__ , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=snake_case__ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=snake_case__ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=snake_case__ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case , snake_case : List[str] = self._nlp, []
for entry in self._reader:
snake_case : Optional[Any] = nlp(**snake_case__ ) if self._reader.is_multi_columns else nlp(snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
outputs.append(snake_case__ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
snake_case : str = self._reader.save_binary(snake_case__ )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(snake_case__ )
| 59
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0
| 0
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
snake_case_ =["image_processor", "tokenizer"]
snake_case_ ="AutoImageProcessor"
snake_case_ ="AutoTokenizer"
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ) -> int:
"""simple docstring"""
super().__init__(snake_case_ ,snake_case_ )
lowerCAmelCase__ : Optional[Any] = self.image_processor
def __call__(self ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=None ,**__lowerCamelCase ) -> int:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCAmelCase__ : Dict = self.tokenizer(snake_case_ ,return_tensors=snake_case_ ,**snake_case_ )
if images is not None:
lowerCAmelCase__ : Union[str, Any] = self.image_processor(snake_case_ ,return_tensors=snake_case_ ,**snake_case_ )
if text is not None and images is not None:
lowerCAmelCase__ : Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) ,tensor_type=snake_case_ )
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case_ ,**snake_case_ )
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Dict:
"""simple docstring"""
return self.tokenizer.decode(*snake_case_ ,**snake_case_ )
@property
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 352
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Any =logging.get_logger(__name__)
__snake_case : Tuple ={
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""vit_msn"""
def __init__(self ,__lowerCamelCase=7_68 ,__lowerCamelCase=12 ,__lowerCamelCase=12 ,__lowerCamelCase=30_72 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.02 ,__lowerCamelCase=1e-06 ,__lowerCamelCase=2_24 ,__lowerCamelCase=16 ,__lowerCamelCase=3 ,__lowerCamelCase=True ,**__lowerCamelCase ,) -> Any:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
lowerCAmelCase__ : List[Any] = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : Optional[int] = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : Union[str, Any] = layer_norm_eps
lowerCAmelCase__ : List[str] = image_size
lowerCAmelCase__ : str = patch_size
lowerCAmelCase__ : Optional[int] = num_channels
lowerCAmelCase__ : int = qkv_bias
| 94
| 0
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCAmelCase_ = 5_00_03
lowerCAmelCase_ = 5_00_02
@require_sentencepiece
@require_tokenizers
class _A ( _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = PLBartTokenizer
_UpperCamelCase : Any = None
_UpperCamelCase : List[str] = False
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase : Any = PLBartTokenizer(_A , language_codes='''base''' , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase : Optional[int] = PLBartTokenizer(_A , language_codes='''base''' , keep_accents=_A )
lowercase : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : int = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase : Tuple = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
lowercase : Dict = tokenizer.vocab_size
lowercase : str = [tokenizer.convert_ids_to_tokens(_A ) for x in range(end - 4 , _A )]
self.assertListEqual(_A , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
lowercase : Optional[int] = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
lowercase : Optional[int] = tokenizer(_A ).input_ids
self.assertEqual(
tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A ) , _A , )
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase : int = PLBartTokenizer(_A , language_codes='''multi''' , keep_accents=_A )
lowercase : str = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : List[str] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase : str = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
lowercase : Optional[Any] = tokenizer.vocab_size
lowercase : List[Any] = [tokenizer.convert_ids_to_tokens(_A ) for x in range(end - 7 , _A )]
self.assertListEqual(
_A , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
lowercase : Union[str, Any] = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
lowercase : Any = tokenizer(_A ).input_ids
self.assertEqual(
tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A ) , _A , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
_UpperCamelCase : Optional[Any] = '''uclanlp/plbart-python-en_XX'''
_UpperCamelCase : Union[str, Any] = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
_UpperCamelCase : Dict = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
_UpperCamelCase : Optional[int] = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def __a ( cls : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
lowercase : int = 1
return cls
def __a ( self : int ) -> str:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 50_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 50_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 50_003 )
def __a ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _A )
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
self.assertIn(_A , self.tokenizer.all_special_ids )
lowercase : Any = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
lowercase : int = self.tokenizer.decode(_A , skip_special_tokens=_A )
lowercase : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def __a ( self : int ) -> Dict:
"""simple docstring"""
lowercase : Any = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , _A )
lowercase : Optional[Any] = 10
lowercase : str = self.tokenizer(_A , max_length=_A , truncation=_A ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _A )
self.assertEqual(len(_A ) , _A )
def __a ( self : str ) -> Tuple:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [50_004, 50_001] )
def __a ( self : List[Any] ) -> str:
"""simple docstring"""
lowercase : Union[str, Any] = tempfile.mkdtemp()
lowercase : List[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_A )
lowercase : int = PLBartTokenizer.from_pretrained(_A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _A )
@require_torch
def __a ( self : int ) -> Any:
"""simple docstring"""
lowercase : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors='''pt''' )
lowercase : List[str] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _A )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def __a ( self : Optional[int] ) -> str:
"""simple docstring"""
lowercase : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_A , truncation=_A , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowercase : Optional[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
lowercase : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _A )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def __a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = self.tokenizer(self.src_text , padding=_A , truncation=_A , max_length=3 , return_tensors='''pt''' )
lowercase : Dict = self.tokenizer(
text_target=self.tgt_text , padding=_A , truncation=_A , max_length=10 , return_tensors='''pt''' )
lowercase : int = targets['''input_ids''']
lowercase : Optional[Any] = shift_tokens_right(_A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Dict = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(_A ) , {
# A, test, EOS, en_XX
'''input_ids''': [[150, 242, 2, 50_003]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 50_001,
} , )
| 308
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase : List[Any] = model(_A , labels=_A ).loss
lowercase : Dict = -tf.math.reduce_mean(_A ).numpy()
lowercase : Union[str, Any] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 308
| 1
|
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> str:
if number > 0:
raise ValueError('input must be a negative integer' )
SCREAMING_SNAKE_CASE_ = len(bin(__snake_case )[3:] )
SCREAMING_SNAKE_CASE_ = bin(abs(__snake_case ) - (1 << binary_number_length) )[3:]
SCREAMING_SNAKE_CASE_ = (
(
"1"
+ "0" * (binary_number_length - len(__snake_case ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCamelCase__ : Dict = TypeVar('T')
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return (position - 1) // 2
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return (2 * position) + 1
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return (2 * position) + 2
class lowerCamelCase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 0
def __len__( self : Optional[Any] ):
return self.elements
def __repr__( self : Optional[int] ):
return str(self.heap )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# Check if the priority queue is empty
return self.elements == 0
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : T , _lowerCAmelCase : int ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
SCREAMING_SNAKE_CASE_ = self.elements
self.elements += 1
self._bubble_up(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[0]
self._bubble_down(_lowerCAmelCase )
return elem
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : T , _lowerCAmelCase : int ):
# Update the weight of the given key
SCREAMING_SNAKE_CASE_ = self.position_map[elem]
SCREAMING_SNAKE_CASE_ = (elem, weight)
if position > 0:
SCREAMING_SNAKE_CASE_ = get_parent_position(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_lowerCAmelCase )
else:
self._bubble_down(_lowerCAmelCase )
else:
self._bubble_down(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : T ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
SCREAMING_SNAKE_CASE_ = self.position_map[elem]
if curr_pos == 0:
return None
SCREAMING_SNAKE_CASE_ = get_parent_position(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[curr_pos]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_up(_lowerCAmelCase )
return None
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : T ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
SCREAMING_SNAKE_CASE_ = self.position_map[elem]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[curr_pos]
SCREAMING_SNAKE_CASE_ = get_child_left_position(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = get_child_right_position(_lowerCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[child_left_position]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_down(_lowerCAmelCase )
if child_left_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_down(_lowerCAmelCase )
else:
return None
if child_right_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_down(_lowerCAmelCase )
return None
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
# Swap the nodes at the given positions
SCREAMING_SNAKE_CASE_ = self.heap[nodea_pos][0]
SCREAMING_SNAKE_CASE_ = self.heap[nodea_pos][0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
SCREAMING_SNAKE_CASE_ = nodea_pos
SCREAMING_SNAKE_CASE_ = nodea_pos
class lowerCamelCase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 0
def __repr__( self : Optional[int] ):
return str(self.connections )
def __len__( self : Tuple ):
return self.nodes
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : T ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
SCREAMING_SNAKE_CASE_ = {}
self.nodes += 1
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : T , _lowerCAmelCase : T , _lowerCAmelCase : int ):
# Add an edge between 2 nodes in the graph
self.add_node(_lowerCAmelCase )
self.add_node(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = weight
SCREAMING_SNAKE_CASE_ = weight
def UpperCAmelCase_ ( __UpperCAmelCase : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
SCREAMING_SNAKE_CASE_ = {node: maxsize for node in graph.connections}
SCREAMING_SNAKE_CASE_ = {node: None for node in graph.connections}
SCREAMING_SNAKE_CASE_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__UpperCAmelCase , __UpperCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
SCREAMING_SNAKE_CASE_ = priority_queue.extract_min()
SCREAMING_SNAKE_CASE_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
SCREAMING_SNAKE_CASE_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCAmelCase , dist[neighbour] )
SCREAMING_SNAKE_CASE_ = node
# running prim's algorithm
while not priority_queue.is_empty():
SCREAMING_SNAKE_CASE_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
SCREAMING_SNAKE_CASE_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCAmelCase , dist[neighbour] )
SCREAMING_SNAKE_CASE_ = node
return dist, parent
| 210
| 0
|
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
UpperCAmelCase = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
warnings.warn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
requires_backends(__SCREAMING_SNAKE_CASE , 'sklearn' )
return (preds == labels).mean()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
warnings.warn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
requires_backends(__SCREAMING_SNAKE_CASE , 'sklearn' )
lowercase = simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = fa_score(y_true=__SCREAMING_SNAKE_CASE , y_pred=__SCREAMING_SNAKE_CASE )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
warnings.warn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
requires_backends(__SCREAMING_SNAKE_CASE , 'sklearn' )
lowercase = pearsonr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0]
lowercase = spearmanr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
warnings.warn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
requires_backends(__SCREAMING_SNAKE_CASE , 'sklearn' )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ), F'''Predictions and labels have mismatched lengths {len(__SCREAMING_SNAKE_CASE )} and {len(__SCREAMING_SNAKE_CASE )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
elif task_name == "mrpc":
return acc_and_fa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif task_name == "sts-b":
return pearson_and_spearman(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif task_name == "qqp":
return acc_and_fa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
elif task_name == "rte":
return {"acc": simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
elif task_name == "hans":
return {"acc": simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
else:
raise KeyError(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
warnings.warn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
requires_backends(__SCREAMING_SNAKE_CASE , 'sklearn' )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(F'''Predictions and labels have mismatched lengths {len(__SCREAMING_SNAKE_CASE )} and {len(__SCREAMING_SNAKE_CASE )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
else:
raise KeyError(__SCREAMING_SNAKE_CASE )
| 195
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
super().__init__()
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=snake_case , speech_processor=snake_case , vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , feature_extractor=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case = "auto" ):
if slice_size == "auto":
lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def __call__( self , snake_case , snake_case=1_6000 , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ):
lowercase = self.speech_processor.feature_extractor(
snake_case , return_tensors='pt' , sampling_rate=snake_case ).input_features.to(self.device )
lowercase = self.speech_model.generate(snake_case , max_length=48_0000 )
lowercase = self.speech_processor.tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case , normalize=snake_case )[
0
]
if isinstance(snake_case , snake_case ):
lowercase = 1
elif isinstance(snake_case , snake_case ):
lowercase = len(snake_case )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(snake_case )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(snake_case )}.''' )
# get prompt text embeddings
lowercase = self.tokenizer(
snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase , lowercase , lowercase = text_embeddings.shape
lowercase = text_embeddings.repeat(1 , snake_case , 1 )
lowercase = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase = 42
if negative_prompt is None:
lowercase = [''] * batch_size
elif type(snake_case ) is not type(snake_case ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !='''
F''' {type(snake_case )}.''' )
elif isinstance(snake_case , snake_case ):
lowercase = [negative_prompt]
elif batch_size != len(snake_case ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
lowercase = negative_prompt
lowercase = text_input_ids.shape[-1]
lowercase = self.tokenizer(
snake_case , padding='max_length' , max_length=snake_case , truncation=snake_case , return_tensors='pt' , )
lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase = uncond_embeddings.shape[1]
lowercase = uncond_embeddings.repeat(1 , snake_case , 1 )
lowercase = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase = torch.randn(snake_case , generator=snake_case , device='cpu' , dtype=snake_case ).to(
self.device )
else:
lowercase = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowercase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase = {}
if accepts_eta:
lowercase = eta
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase = self.scheduler.scale_model_input(snake_case , snake_case )
# predict the noise residual
lowercase = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.chunk(2 )
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case )
lowercase = 1 / 0.18_215 * latents
lowercase = self.vae.decode(snake_case ).sample
lowercase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(snake_case )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
| 195
| 1
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Union[str, Any] = CustomTokenizer
pass
| 34
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def UpperCamelCase( lowercase_ ) -> Any:
'''simple docstring'''
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def UpperCamelCase( ) -> str:
'''simple docstring'''
snake_case_ = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=lowercase_ )
snake_case_ = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(lowercase_ )
EnvironmentCommand.register_subcommand(lowercase_ )
TestCommand.register_subcommand(lowercase_ )
RunBeamCommand.register_subcommand(lowercase_ )
DummyDataCommand.register_subcommand(lowercase_ )
# Parse args
snake_case_ , snake_case_ = parser.parse_known_args()
if not hasattr(lowercase_ , """func""" ):
parser.print_help()
exit(1 )
snake_case_ = parse_unknown_args(lowercase_ )
# Run
snake_case_ = args.func(lowercase_ , **lowercase_ )
service.run()
if __name__ == "__main__":
main()
| 34
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase_ : List[str] = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 81
|
"""simple docstring"""
def _A ( ):
"""simple docstring"""
for n in range(1 , 1_00_00_00 ):
yield n * (n + 1) // 2
def _A ( lowercase ):
"""simple docstring"""
a =1
a =2
while i * i <= n:
a =0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _A ( ):
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(lowercase ) > 5_00 )
if __name__ == "__main__":
print(solution())
| 81
| 1
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=(), _UpperCAmelCase=None, _UpperCAmelCase="no", _UpperCAmelCase="29500" ) -> int:
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Tuple = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
lowerCAmelCase : List[str] = True
elif "IPython" in sys.modules:
lowerCAmelCase : Tuple = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
lowerCAmelCase : Tuple = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME', _UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
lowerCAmelCase : int = 8
lowerCAmelCase : Optional[Any] = PrepareForLaunch(_UpperCAmelCase, distributed_type='TPU' )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(_UpperCAmelCase, args=_UpperCAmelCase, nprocs=_UpperCAmelCase, start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*_UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase, master_addr='127.0.01', master_port=_UpperCAmelCase, mixed_precision=_UpperCAmelCase ):
lowerCAmelCase : Dict = PrepareForLaunch(_UpperCAmelCase, distributed_type='MULTI_GPU' )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(_UpperCAmelCase, args=_UpperCAmelCase, nprocs=_UpperCAmelCase, start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCAmelCase : str = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=(), _UpperCAmelCase=2 ) -> List[Any]:
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase, master_addr='127.0.01', master_port='29500', accelerate_mixed_precision='no', accelerate_debug_rdv_file=tmp_file.name, accelerate_use_cpu='yes', ):
lowerCAmelCase : Union[str, Any] = PrepareForLaunch(_UpperCAmelCase, debug=_UpperCAmelCase )
start_processes(_UpperCAmelCase, args=_UpperCAmelCase, nprocs=_UpperCAmelCase, start_method='fork' )
| 323
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[List[PIL.Image.Image], np.ndarray]
lowerCAmelCase_ : Optional[List[bool]]
lowerCAmelCase_ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 323
| 1
|
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
return 10 - x * x
def lowercase ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if equation(_SCREAMING_SNAKE_CASE ) * equation(_SCREAMING_SNAKE_CASE ) >= 0:
raise ValueError('''Wrong space!''' )
_UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(_SCREAMING_SNAKE_CASE ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_SCREAMING_SNAKE_CASE ) * equation(_SCREAMING_SNAKE_CASE ) < 0:
_UpperCAmelCase = c
else:
_UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 260
|
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__A : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int = 1_6000 ):
'''simple docstring'''
_UpperCAmelCase = int(round(sample_rate * max_length ) )
if len(_SCREAMING_SNAKE_CASE ) <= sample_length:
return wav
_UpperCAmelCase = randint(0 , len(_SCREAMING_SNAKE_CASE ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """Name of a dataset from the datasets package"""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """A file containing the training audio paths and labels."""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""})
UpperCamelCase__ = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
UpperCamelCase__ = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
UpperCamelCase__ = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
UpperCamelCase__ = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCamelCase__ = field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""})
UpperCamelCase__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""})
UpperCamelCase__ = field(
default=lowerCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def lowercase__ ( self : Optional[Any] )->int:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , __UpperCamelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
_UpperCAmelCase = DatasetDict()
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
f'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'''Make sure to set `--label_column_name` to the correct text column - one of '''
f'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_UpperCAmelCase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_UpperCAmelCase = feature_extractor.model_input_names[0]
def train_transforms(_SCREAMING_SNAKE_CASE : Tuple ):
_UpperCAmelCase = []
for audio in batch[data_args.audio_column_name]:
_UpperCAmelCase = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate )
_UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )}
_UpperCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_SCREAMING_SNAKE_CASE : Optional[int] ):
_UpperCAmelCase = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
_UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate )
_UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )}
_UpperCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_UpperCAmelCase = raw_datasets['''train'''].features[data_args.label_column_name].names
_UpperCAmelCase , _UpperCAmelCase = {}, {}
for i, label in enumerate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = str(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = label
# Load the accuracy metric from the datasets package
_UpperCAmelCase = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_SCREAMING_SNAKE_CASE : List[str] ):
_UpperCAmelCase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=eval_pred.label_ids )
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_SCREAMING_SNAKE_CASE ) , labelaid=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_UpperCAmelCase = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_UpperCAmelCase = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE )
# Initialize our trainer
_UpperCAmelCase = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
_UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCAmelCase = trainer.evaluate()
trainer.log_metrics('''eval''' , _SCREAMING_SNAKE_CASE )
trainer.save_metrics('''eval''' , _SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
_UpperCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 260
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: int , UpperCamelCase: Dict=7 , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: int=30 , UpperCamelCase: Union[str, Any]=4_00 , UpperCamelCase: List[str]=True , UpperCamelCase: Optional[int]=None , UpperCamelCase: int=True , UpperCamelCase: Dict=[0.5, 0.5, 0.5] , UpperCamelCase: List[Any]=[0.5, 0.5, 0.5] , UpperCamelCase: List[str]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: List[Any]=True , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Tuple , UpperCamelCase: Union[str, Any]=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["""shortest_edge"""] * h / w )
A__ = self.size["""shortest_edge"""]
elif w > h:
A__ = self.size["""shortest_edge"""]
A__ = int(self.size["""shortest_edge"""] * w / h )
else:
A__ = self.size["""shortest_edge"""]
A__ = self.size["""shortest_edge"""]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" )
A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
@slow
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ = YolosImageProcessor(format="""coco_panoptic""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
| 353
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
"""simple docstring"""
def __init__( self: Optional[int] , UpperCamelCase: List[str] , UpperCamelCase: Dict=13 , UpperCamelCase: Optional[Any]=30 , UpperCamelCase: Optional[Any]=2 , UpperCamelCase: List[str]=3 , UpperCamelCase: Tuple=True , UpperCamelCase: Dict=True , UpperCamelCase: Optional[int]=32 , UpperCamelCase: Tuple=5 , UpperCamelCase: Optional[Any]=4 , UpperCamelCase: Optional[Any]=37 , UpperCamelCase: Optional[Any]="gelu" , UpperCamelCase: Dict=0.1 , UpperCamelCase: Any=0.1 , UpperCamelCase: str=10 , UpperCamelCase: Any=0.02 , UpperCamelCase: List[Any]=None , UpperCamelCase: int=2 , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A__ = (image_size // patch_size) ** 2
A__ = num_patches + 1
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self: int ):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: str , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = ViTModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = ViTForMaskedImageModeling(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A__ = 1
A__ = ViTForMaskedImageModeling(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase ( self: str , UpperCamelCase: Dict , UpperCamelCase: List[Any] , UpperCamelCase: List[Any] ):
"""simple docstring"""
A__ = self.type_sequence_label_size
A__ = ViTForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ = 1
A__ = ViTForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = ViTModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self: str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def _snake_case ( ):
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(UpperCamelCase )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
A__ = model(**UpperCamelCase )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
A__ = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(UpperCamelCase )
A__ = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=4_80 )
A__ = prepare_img()
A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
A__ = inputs.pixel_values.to(UpperCamelCase )
# forward pass
with torch.no_grad():
A__ = model(UpperCamelCase , interpolate_pos_encoding=UpperCamelCase )
# verify the logits
A__ = torch.Size((1, 36_01, 3_84) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase )
A__ = torch.tensor(
[[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
A__ = inputs.pixel_values.to(UpperCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A__ = model(UpperCamelCase )
| 69
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = ShapEImgaImgPipeline
UpperCAmelCase__ : int = ["image"]
UpperCAmelCase__ : List[str] = ["image"]
UpperCAmelCase__ : int = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase__ : List[Any] = False
@property
def _a ( self ) -> Tuple:
return 32
@property
def _a ( self ) -> int:
return 32
@property
def _a ( self ) -> Optional[int]:
return self.time_input_dim * 4
@property
def _a ( self ) -> Any:
return 8
@property
def _a ( self ) -> int:
torch.manual_seed(0 )
__UpperCamelCase =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__UpperCamelCase =CLIPVisionModel(A_ )
return model
@property
def _a ( self ) -> List[Any]:
__UpperCamelCase =CLIPImageProcessor(
crop_size=224 , do_center_crop=A_ , do_normalize=A_ , do_resize=A_ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
@property
def _a ( self ) -> str:
torch.manual_seed(0 )
__UpperCamelCase ={
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__UpperCamelCase =PriorTransformer(**A_ )
return model
@property
def _a ( self ) -> int:
torch.manual_seed(0 )
__UpperCamelCase ={
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__UpperCamelCase =ShapERenderer(**A_ )
return model
def _a ( self ) -> List[str]:
__UpperCamelCase =self.dummy_prior
__UpperCamelCase =self.dummy_image_encoder
__UpperCamelCase =self.dummy_image_processor
__UpperCamelCase =self.dummy_renderer
__UpperCamelCase =HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=A_ , clip_sample=A_ , clip_sample_range=1.0 , )
__UpperCamelCase ={
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def _a ( self , A_ , A_=0 ) -> Any:
__UpperCamelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
__UpperCamelCase =torch.manual_seed(A_ )
else:
__UpperCamelCase =torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase ={
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def _a ( self ) -> str:
__UpperCamelCase ='cpu'
__UpperCamelCase =self.get_dummy_components()
__UpperCamelCase =self.pipeline_class(**A_ )
__UpperCamelCase =pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase =pipe(**self.get_dummy_inputs(A_ ) )
__UpperCamelCase =output.images[0]
__UpperCamelCase =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__UpperCamelCase =np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> int:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self ) -> List[str]:
__UpperCamelCase =torch_device == 'cpu'
__UpperCamelCase =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=A_ , relax_max_difference=A_ , )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.get_dummy_components()
__UpperCamelCase =self.pipeline_class(**A_ )
__UpperCamelCase =pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase =1
__UpperCamelCase =2
__UpperCamelCase =self.get_dummy_inputs(A_ )
for key in inputs.keys():
if key in self.batch_params:
__UpperCamelCase =batch_size * [inputs[key]]
__UpperCamelCase =pipe(**A_ , num_images_per_prompt=A_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> int:
__UpperCamelCase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
__UpperCamelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
__UpperCamelCase =ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
__UpperCamelCase =pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase =torch.Generator(device=A_ ).manual_seed(0 )
__UpperCamelCase =pipe(
A_ , generator=A_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(A_ , A_ )
| 62
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "yolos"
def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-12 , A_=[512, 864] , A_=16 , A_=3 , A_=True , A_=100 , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=5 , A_=2 , A_=0.1 , **A_ , ) -> Any:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =qkv_bias
__UpperCamelCase =num_detection_tokens
__UpperCamelCase =use_mid_position_embeddings
__UpperCamelCase =auxiliary_loss
# Hungarian matcher
__UpperCamelCase =class_cost
__UpperCamelCase =bbox_cost
__UpperCamelCase =giou_cost
# Loss coefficients
__UpperCamelCase =bbox_loss_coefficient
__UpperCamelCase =giou_loss_coefficient
__UpperCamelCase =eos_coefficient
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _a ( self ) -> float:
return 1E-4
@property
def _a ( self ) -> int:
return 12
| 62
| 1
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a ):
__a = len(a )
# We need to create solution object to save path.
__a = [[0 for _ in range(a )] for _ in range(a )]
__a = run_maze(a , 0 , 0 , a )
if solved:
print("\n".join(str(a ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def _lowerCamelCase( a , a , a , a ):
__a = len(a )
# Final check point.
if i == j == (size - 1):
__a = 1
return True
__a = (not i < 0) and (not j < 0) # Check lower bounds
__a = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__a = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__a = 1
# check for directions
if (
run_maze(a , i + 1 , a , a )
or run_maze(a , a , j + 1 , a )
or run_maze(a , i - 1 , a , a )
or run_maze(a , a , j - 1 , a )
):
return True
__a = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
SCREAMING_SNAKE_CASE__:List[Any] = None
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Tuple = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Optional[int] = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__:List[str] = {
"""camembert-base""": 512,
}
SCREAMING_SNAKE_CASE__:str = """▁"""
class snake_case__ ( snake_case_ ):
_snake_case : List[Any] = VOCAB_FILES_NAMES
_snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = ["""input_ids""", """attention_mask"""]
_snake_case : str = CamembertTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase=["<s>NOTUSED", "</s>NOTUSED"] , **lowerCamelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
__a = vocab_file
__a = False if not self.vocab_file else True
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 268
| 0
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
lowerCAmelCase__ = logging.getLogger(__name__)
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'token-classification'
def __init__( self , lowercase ) -> List[str]:
'''simple docstring'''
if type(lowercase ) == dict:
A__ = Namespace(**lowercase )
A__ = import_module("tasks" )
try:
A__ = getattr(lowercase , hparams.task_type )
A__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
A__ = self.token_classification_task.get_labels(hparams.labels )
A__ = CrossEntropyLoss().ignore_index
super().__init__(lowercase , len(self.labels ) , self.mode )
def UpperCamelCase ( self , **lowercase ) -> Any:
'''simple docstring'''
return self.model(**lowercase )
def UpperCamelCase ( self , lowercase , lowercase ) -> int:
'''simple docstring'''
A__ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
A__ = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
A__ = self(**lowercase )
A__ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.hparams
for mode in ["train", "dev", "test"]:
A__ = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , lowercase )
A__ = torch.load(lowercase )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
A__ = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase )
A__ = self.token_classification_task.convert_examples_to_features(
lowercase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , lowercase )
torch.save(lowercase , lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase = False ) -> DataLoader:
'''simple docstring'''
A__ = self._feature_file(lowercase )
logger.info("Loading features from cached file %s" , lowercase )
A__ = torch.load(lowercase )
A__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
A__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
A__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
A__ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
A__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase )
def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
"""Compute validation""" ""
A__ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
A__ = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
A__ = self(**lowercase )
A__ , A__ = outputs[:2]
A__ = logits.detach().cpu().numpy()
A__ = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase ( self , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = torch.stack([x["val_loss"] for x in outputs] ).mean()
A__ = np.concatenate([x["pred"] for x in outputs] , axis=0 )
A__ = np.argmax(lowercase , axis=2 )
A__ = np.concatenate([x["target"] for x in outputs] , axis=0 )
A__ = dict(enumerate(self.labels ) )
A__ = [[] for _ in range(out_label_ids.shape[0] )]
A__ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
A__ = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(lowercase , lowercase ),
"precision": precision_score(lowercase , lowercase ),
"recall": recall_score(lowercase , lowercase ),
"f1": fa_score(lowercase , lowercase ),
}
A__ = dict(results.items() )
A__ = results
return ret, preds_list, out_label_list
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
A__ , A__ , A__ = self._eval_end(lowercase )
A__ = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase ( self , lowercase ) -> str:
'''simple docstring'''
A__ , A__ , A__ = self._eval_end(lowercase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
A__ = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
"--task_type" , default="NER" , type=lowercase , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=128 , type=lowercase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=lowercase , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=lowercase , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
lowerCAmelCase__ = NERTransformer.add_model_specific_args(parser, os.getcwd())
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = NERTransformer(args)
lowerCAmelCase__ = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
lowerCAmelCase__ = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
lowerCAmelCase__ = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 68
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ = [0, 2_5, 5_0]
UpperCamelCase__ = [2_5, 5_0, 7_5]
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ = np.ones(7_5)
UpperCamelCase__ = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 65
| 0
|
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
print("""Making key files...""" )
make_key_files("""rsa""" , 1_0_2_4 )
print("""Key files generation successful.""" )
def __lowerCamelCase ( __a :int ) -> List[str]:
"""simple docstring"""
print("""Generating prime p...""" )
A__ = rabinMiller.generate_large_prime(lowerCAmelCase__ )
print("""Generating prime q...""" )
A__ = rabinMiller.generate_large_prime(lowerCAmelCase__ )
A__ = p * q
print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" )
while True:
A__ = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(lowerCAmelCase__ , (p - 1) * (q - 1) ) == 1:
break
print("""Calculating d that is mod inverse of e...""" )
A__ = cryptoMath.find_mod_inverse(lowerCAmelCase__ , (p - 1) * (q - 1) )
A__ = (n, e)
A__ = (n, d)
return (public_key, private_key)
def __lowerCamelCase ( __a :str , __a :int ) -> Any:
"""simple docstring"""
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print("""\nWARNING:""" )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
A__ = generate_key(lowerCAmelCase__ )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , """w""" ) as out_file:
out_file.write(F'{key_size},{public_key[0]},{public_key[1]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , """w""" ) as out_file:
out_file.write(F'{key_size},{private_key[0]},{private_key[1]}' )
if __name__ == "__main__":
main()
| 366
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : Optional[int] = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = '''roberta'''
def __init__( self : Any , __lowerCAmelCase : Tuple=5_02_65 , __lowerCAmelCase : Optional[int]=7_68 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : Dict=12 , __lowerCAmelCase : Optional[Any]=30_72 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.0_2 , __lowerCAmelCase : Dict=1e-12 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Dict="absolute" , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : str , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def a_ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 276
| 0
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = tokenizer(example["""content"""] , truncation=__UpperCamelCase )["""input_ids"""]
SCREAMING_SNAKE_CASE__ = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
__lowerCamelCase : Dict = HfArgumentParser(PretokenizationArguments)
__lowerCamelCase : Union[str, Any] = parser.parse_args()
if args.num_workers is None:
__lowerCamelCase : Optional[Any] = multiprocessing.cpu_count()
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowerCamelCase : str = time.time()
__lowerCamelCase : Union[str, Any] = load_dataset(args.dataset_name, split='''train''')
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
__lowerCamelCase : Dict = time.time()
__lowerCamelCase : Tuple = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
__lowerCamelCase : Union[str, Any] = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 219
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = KandinskyVaaImgaImgPipeline
lowerCAmelCase_ = ["image_embeds", "negative_image_embeds", "image"]
lowerCAmelCase_ = [
"image_embeds",
"negative_image_embeds",
"image",
]
lowerCAmelCase_ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCAmelCase_ = False
@property
def __a ( self : Union[str, Any] ):
"""simple docstring"""
return 32
@property
def __a ( self : Union[str, Any] ):
"""simple docstring"""
return 32
@property
def __a ( self : Optional[Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def __a ( self : Optional[int] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __a ( self : List[str] ):
"""simple docstring"""
return 1_00
@property
def __a ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(**_lowercase )
return model
@property
def __a ( self : str ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __a ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.dummy_unet
SCREAMING_SNAKE_CASE__ = self.dummy_movq
SCREAMING_SNAKE_CASE__ = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
SCREAMING_SNAKE_CASE__ = DDIMScheduler(**_lowercase )
SCREAMING_SNAKE_CASE__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __a ( self : Optional[Any] , _lowercase : Any , _lowercase : Tuple=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowercase ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowercase )
# create init_image
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(_lowercase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(_lowercase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
SCREAMING_SNAKE_CASE__ = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """cpu"""
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**_lowercase )
SCREAMING_SNAKE_CASE__ = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ = pipe(**self.get_dummy_inputs(_lowercase ) )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
SCREAMING_SNAKE_CASE__ = """A red cartoon frog, 4k"""
SCREAMING_SNAKE_CASE__ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_lowercase )
SCREAMING_SNAKE_CASE__ = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = pipe_prior(
_lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
SCREAMING_SNAKE_CASE__ = pipeline(
image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 219
| 1
|
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> Dict:
__lowerCamelCase : int = len(__lowerCamelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__lowerCamelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __lowerCamelCase , __lowerCamelCase , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
__lowerCamelCase : list[list[str]] = []
depth_first_search([] , [] , [] , __lowerCamelCase , __lowerCamelCase )
# Print all the boards
for board in boards:
for column in board:
print(__lowerCamelCase )
print('' )
print(len(__lowerCamelCase ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 360
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : str = '''vit_msn'''
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : int=7_6_8 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=1_2 ,SCREAMING_SNAKE_CASE__ : Tuple=3_0_7_2 ,SCREAMING_SNAKE_CASE__ : str="gelu" ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : str=0.0 ,SCREAMING_SNAKE_CASE__ : Any=0.02 ,SCREAMING_SNAKE_CASE__ : int=1E-06 ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_2_4 ,SCREAMING_SNAKE_CASE__ : Dict=1_6 ,SCREAMING_SNAKE_CASE__ : int=3 ,SCREAMING_SNAKE_CASE__ : List[str]=True ,**SCREAMING_SNAKE_CASE__ : str ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : str = attention_probs_dropout_prob
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : Tuple = image_size
__lowerCamelCase : Union[str, Any] = patch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : List[str] = qkv_bias
| 113
| 0
|
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=5_1_2,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
__UpperCAmelCase =parser.parse_args()
__UpperCAmelCase =download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 67
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a : Any = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[int] = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : str = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 210
| 0
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __UpperCamelCase ( lowercase__ ):
lowercase : List[str] = 'Wav2Vec2FeatureExtractor'
lowercase : Union[str, Any] = 'AutoTokenizer'
def __init__( self :Optional[Any] ,_UpperCamelCase :Dict ,_UpperCamelCase :List[Any] ):
super().__init__(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : List[Any] = self.feature_extractor
snake_case_ : Dict = False
@classmethod
def a__ ( cls :Optional[int] ,_UpperCamelCase :str ,**_UpperCamelCase :List[str] ):
try:
return super().from_pretrained(_UpperCamelCase ,**_UpperCamelCase )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ ,_UpperCamelCase ,)
snake_case_ : Dict = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase ,**_UpperCamelCase )
snake_case_ : Dict = WavaVecaCTCTokenizer.from_pretrained(_UpperCamelCase ,**_UpperCamelCase )
return cls(feature_extractor=_UpperCamelCase ,tokenizer=_UpperCamelCase )
def __call__( self :Optional[Any] ,*_UpperCamelCase :Any ,**_UpperCamelCase :Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCamelCase ,**_UpperCamelCase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
snake_case_ : Optional[Any] = kwargs.pop("""raw_speech""" )
else:
snake_case_ : Any = kwargs.pop("""audio""" ,_UpperCamelCase )
snake_case_ : str = kwargs.pop("""sampling_rate""" ,_UpperCamelCase )
snake_case_ : Optional[Any] = kwargs.pop("""text""" ,_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
snake_case_ : Tuple = args[0]
snake_case_ : int = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
snake_case_ : Optional[Any] = self.feature_extractor(_UpperCamelCase ,*_UpperCamelCase ,sampling_rate=_UpperCamelCase ,**_UpperCamelCase )
if text is not None:
snake_case_ : Any = self.tokenizer(_UpperCamelCase ,**_UpperCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case_ : str = encodings["""input_ids"""]
return inputs
def a__ ( self :Dict ,*_UpperCamelCase :Optional[int] ,**_UpperCamelCase :List[Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_UpperCamelCase ,**_UpperCamelCase )
snake_case_ : Any = kwargs.pop("""input_features""" ,_UpperCamelCase )
snake_case_ : List[Any] = kwargs.pop("""labels""" ,_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
snake_case_ : Any = args[0]
snake_case_ : List[str] = args[1:]
if input_features is not None:
snake_case_ : int = self.feature_extractor.pad(_UpperCamelCase ,*_UpperCamelCase ,**_UpperCamelCase )
if labels is not None:
snake_case_ : Optional[int] = self.tokenizer.pad(_UpperCamelCase ,**_UpperCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
snake_case_ : List[Any] = labels["""input_ids"""]
return input_features
def a__ ( self :List[Any] ,*_UpperCamelCase :Any ,**_UpperCamelCase :int ):
return self.tokenizer.batch_decode(*_UpperCamelCase ,**_UpperCamelCase )
def a__ ( self :Optional[Any] ,*_UpperCamelCase :Union[str, Any] ,**_UpperCamelCase :Optional[int] ):
return self.tokenizer.decode(*_UpperCamelCase ,**_UpperCamelCase )
@contextmanager
def a__ ( self :List[str] ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
snake_case_ : str = True
snake_case_ : Optional[int] = self.tokenizer
yield
snake_case_ : Any = self.feature_extractor
snake_case_ : str = False
| 357
|
'''simple docstring'''
import functools
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : List[str] = len(lowerCamelCase_ )
snake_case_ : Dict = len(lowerCamelCase_ )
@functools.cache
def min_distance(lowerCamelCase_ :int , lowerCamelCase_ :int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
snake_case_ : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCamelCase_ ) , 1 + min_distance(lowerCamelCase_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8
| 0
|
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( __a ):
__a : Any = (PNDMScheduler,)
__a : Any = (("""num_inference_steps""", 50),)
def A ( self : Dict , **lowercase : Tuple ):
'''simple docstring'''
UpperCAmelCase = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowercase )
return config
def A ( self : Dict , lowercase : Dict=0 , **lowercase : str ):
'''simple docstring'''
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config(**lowercase )
UpperCAmelCase = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
UpperCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
UpperCAmelCase = scheduler_class.from_pretrained(lowercase )
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
UpperCAmelCase = dummy_past_residuals[:]
UpperCAmelCase = scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCAmelCase = new_scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase = scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCAmelCase = new_scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : Tuple ):
'''simple docstring'''
pass
def A ( self : Any , lowercase : Optional[Any]=0 , **lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
UpperCAmelCase = scheduler_class.from_pretrained(lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase = dummy_past_residuals[:]
UpperCAmelCase = scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCAmelCase = new_scheduler.step_prk(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase = scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample
UpperCAmelCase = new_scheduler.step_plms(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : Optional[Any] , **lowercase : Any ):
'''simple docstring'''
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(**lowercase )
UpperCAmelCase = scheduler_class(**lowercase )
UpperCAmelCase = 10
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase = model(lowercase , lowercase )
UpperCAmelCase = scheduler.step_prk(lowercase , lowercase , lowercase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase = model(lowercase , lowercase )
UpperCAmelCase = scheduler.step_plms(lowercase , lowercase , lowercase ).prev_sample
return sample
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase , '''set_timesteps''' ):
scheduler.set_timesteps(lowercase )
elif num_inference_steps is not None and not hasattr(lowercase , '''set_timesteps''' ):
UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase = dummy_past_residuals[:]
UpperCAmelCase = scheduler.step_prk(lowercase , 0 , lowercase , **lowercase ).prev_sample
UpperCAmelCase = scheduler.step_prk(lowercase , 1 , lowercase , **lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase = scheduler.step_plms(lowercase , 0 , lowercase , **lowercase ).prev_sample
UpperCAmelCase = scheduler.step_plms(lowercase , 1 , lowercase , **lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self : List[str] ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A ( self : Any ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase )
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase = scheduler_class(**lowercase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def A ( self : Any ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase , beta_end=lowercase )
def A ( self : List[str] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase )
def A ( self : str ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def A ( self : Tuple ):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase )
def A ( self : int ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase = scheduler.step_prk(lowercase , lowercase , lowercase ).prev_sample
def A ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(lowercase ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = self.full_loop()
UpperCAmelCase = torch.sum(torch.abs(lowercase ) )
UpperCAmelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
UpperCAmelCase = torch.sum(torch.abs(lowercase ) )
UpperCAmelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.full_loop(set_alpha_to_one=lowercase , beta_start=0.01 )
UpperCAmelCase = torch.sum(torch.abs(lowercase ) )
UpperCAmelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.full_loop(set_alpha_to_one=lowercase , beta_start=0.01 )
UpperCAmelCase = torch.sum(torch.abs(lowercase ) )
UpperCAmelCase = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 34
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
A =[
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
A =logging.getLogger()
def snake_case_ ():
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCAmelCase = parser.parse_args()
return args.f
def snake_case_ (_a : List[str] , _a : Union[str, Any]="eval" ):
UpperCAmelCase = os.path.join(_a , F"{split}_results.json" )
if os.path.exists(_a ):
with open(_a , '''r''' ) as f:
return json.load(_a )
raise ValueError(F"can't find {path}" )
A =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( __a ):
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_flax_glue.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_clm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_summarization_flax.main()
UpperCAmelCase = get_results(lowercase , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_mlm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_ta_mlm_flax.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_flax_ner.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(lowercase , '''argv''' , lowercase ):
run_qa.main()
UpperCAmelCase = get_results(lowercase )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34
| 1
|
'''simple docstring'''
import numpy as np
import qiskit
def A_( A : int = 8 , A : int | None = None):
UpperCamelCase = np.random.default_rng(seed=A)
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
UpperCamelCase = 6 * key_len
# Measurement basis for Alice's qubits.
UpperCamelCase = rng.integers(2 , size=A)
# The set of states Alice will prepare.
UpperCamelCase = rng.integers(2 , size=A)
# Measurement basis for Bob's qubits.
UpperCamelCase = rng.integers(2 , size=A)
# Quantum Circuit to simulate BB84
UpperCamelCase = qiskit.QuantumCircuit(A , name='BB84')
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(A):
if alice_state[index] == 1:
bbaa_circ.x(A)
if alice_basis[index] == 1:
bbaa_circ.h(A)
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(A):
if bob_basis[index] == 1:
bbaa_circ.h(A)
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
UpperCamelCase = qiskit.Aer.get_backend('aer_simulator')
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
UpperCamelCase = qiskit.execute(A , A , shots=1 , seed_simulator=A)
# Returns the result of measurement.
UpperCamelCase = job.result().get_counts(A).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
UpperCamelCase = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
A , A , A)
if alice_basis_bit == bob_basis_bit
])
# Get final key. Pad with 0 if too short, otherwise truncate.
UpperCamelCase = gen_key[:key_len] if len(A) >= key_len else gen_key.ljust(A , '0')
return key
if __name__ == "__main__":
print(f"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 251
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """openai-gpt"""
lowerCAmelCase_ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , A_=40478 , A_=512 , A_=768 , A_=12 , A_=12 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=1e-5 , A_=0.02 , A_="cls_index" , A_=True , A_=None , A_=True , A_=0.1 , **A_ , )-> List[str]:
'''simple docstring'''
UpperCamelCase = vocab_size
UpperCamelCase = n_positions
UpperCamelCase = n_embd
UpperCamelCase = n_layer
UpperCamelCase = n_head
UpperCamelCase = afn
UpperCamelCase = resid_pdrop
UpperCamelCase = embd_pdrop
UpperCamelCase = attn_pdrop
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = summary_type
UpperCamelCase = summary_use_proj
UpperCamelCase = summary_activation
UpperCamelCase = summary_first_dropout
UpperCamelCase = summary_proj_to_labels
super().__init__(**A_ )
| 251
| 1
|
from numpy import exp, pi, sqrt
def UpperCAmelCase ( a_ , a_ = 0.0 , a_ = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : int = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """segformer"""
def __init__( self , A=3 , A=4 , A=[2, 2, 2, 2] , A=[8, 4, 2, 1] , A=[3_2, 6_4, 1_6_0, 2_5_6] , A=[7, 3, 3, 3] , A=[4, 2, 2, 2] , A=[1, 2, 5, 8] , A=[4, 4, 4, 4] , A="gelu" , A=0.0 , A=0.0 , A=0.1 , A=0.02 , A=0.1 , A=1e-6 , A=2_5_6 , A=2_5_5 , **A , ) -> Dict:
super().__init__(**A )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , A , )
snake_case : List[str] = num_channels
snake_case : Optional[int] = num_encoder_blocks
snake_case : Optional[int] = depths
snake_case : str = sr_ratios
snake_case : str = hidden_sizes
snake_case : Any = patch_sizes
snake_case : Tuple = strides
snake_case : List[str] = mlp_ratios
snake_case : Optional[Any] = num_attention_heads
snake_case : int = hidden_act
snake_case : Tuple = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : List[Any] = classifier_dropout_prob
snake_case : Optional[Any] = initializer_range
snake_case : Optional[Any] = drop_path_rate
snake_case : int = layer_norm_eps
snake_case : Optional[Any] = decoder_hidden_size
snake_case : Tuple = kwargs.get("""reshape_last_stage""" , A )
snake_case : List[str] = semantic_loss_ignore_index
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = version.parse("""1.11""" )
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase ( self ) -> float:
return 1e-4
@property
def UpperCAmelCase ( self ) -> int:
return 1_2
| 124
| 0
|
'''simple docstring'''
from __future__ import annotations
import queue
class _SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , a__ : int ):
__magic_name__ = data
__magic_name__ = None
__magic_name__ = None
def UpperCamelCase ( ) -> TreeNode:
'''simple docstring'''
print('''\n********Press N to stop entering at any point of time********\n''' )
__magic_name__ = input('''Enter the value of the root node: ''' ).strip().lower()
__magic_name__ = queue.Queue()
__magic_name__ = TreeNode(int(a ) )
q.put(a )
while not q.empty():
__magic_name__ = q.get()
__magic_name__ = F'''Enter the left node of {node_found.data}: '''
__magic_name__ = input(a ).strip().lower() or '''n'''
if check == "n":
return tree_node
__magic_name__ = TreeNode(int(a ) )
__magic_name__ = left_node
q.put(a )
__magic_name__ = F'''Enter the right node of {node_found.data}: '''
__magic_name__ = input(a ).strip().lower() or '''n'''
if check == "n":
return tree_node
__magic_name__ = TreeNode(int(a ) )
__magic_name__ = right_node
q.put(a )
raise
def UpperCamelCase ( a ) -> None:
'''simple docstring'''
if not isinstance(a , a ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def UpperCamelCase ( a ) -> None:
'''simple docstring'''
if not isinstance(a , a ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def UpperCamelCase ( a ) -> None:
'''simple docstring'''
if not isinstance(a , a ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def UpperCamelCase ( a ) -> None:
'''simple docstring'''
if not isinstance(a , a ) or not node:
return
__magic_name__ = queue.Queue()
q.put(a )
while not q.empty():
__magic_name__ = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def UpperCamelCase ( a ) -> None:
'''simple docstring'''
if not isinstance(a , a ) or not node:
return
__magic_name__ = queue.Queue()
q.put(a )
while not q.empty():
__magic_name__ = []
while not q.empty():
__magic_name__ = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(a )
def UpperCamelCase ( a ) -> None:
'''simple docstring'''
if not isinstance(a , a ) or not node:
return
__magic_name__ = []
__magic_name__ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(a )
__magic_name__ = n.left
# end of while means current node doesn't have left child
__magic_name__ = stack.pop()
# start to traverse its right child
__magic_name__ = n.right
def UpperCamelCase ( a ) -> None:
'''simple docstring'''
if not isinstance(a , a ) or not node:
return
__magic_name__ = []
__magic_name__ = node
while n or stack:
while n:
stack.append(a )
__magic_name__ = n.left
__magic_name__ = stack.pop()
print(n.data , end=''',''' )
__magic_name__ = n.right
def UpperCamelCase ( a ) -> None:
'''simple docstring'''
if not isinstance(a , a ) or not node:
return
__magic_name__ , __magic_name__ = [], []
__magic_name__ = node
stacka.append(a )
while stacka: # to find the reversed order of post order, store it in stack2
__magic_name__ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(a )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def UpperCamelCase ( a = "" , a=50 , a="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
__magic_name__ , __magic_name__ = divmod(width - len(a ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
_lowerCAmelCase = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 98
|
'''simple docstring'''
from random import randint, random
def UpperCamelCase ( a , a , a , a = False , a = False , a = 5 , ) -> list:
'''simple docstring'''
__magic_name__ = [[-1] * number_of_cells] # Create a highway without any car
__magic_name__ = 0
__magic_name__ = max(a , 0 )
while i < number_of_cells:
__magic_name__ = (
randint(0 , a ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def UpperCamelCase ( a , a ) -> int:
'''simple docstring'''
__magic_name__ = 0
__magic_name__ = highway_now[car_index + 1 :]
for cell in range(len(a ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(a , -1 )
def UpperCamelCase ( a , a , a ) -> list:
'''simple docstring'''
__magic_name__ = len(a )
# Beforce calculations, the highway is empty
__magic_name__ = [-1] * number_of_cells
for car_index in range(a ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__magic_name__ = min(highway_now[car_index] + 1 , a )
# Number of empty cell before the next car
__magic_name__ = get_distance(a , a ) - 1
# We can't have the car causing an accident
__magic_name__ = min(next_highway[car_index] , a )
if random() < probability:
# Randomly, a driver will slow down
__magic_name__ = max(next_highway[car_index] - 1 , 0 )
return next_highway
def UpperCamelCase ( a , a , a , a ) -> list:
'''simple docstring'''
__magic_name__ = len(highway[0] )
for i in range(a ):
__magic_name__ = update(highway[i] , a , a )
__magic_name__ = [-1] * number_of_cells
for car_index in range(a ):
__magic_name__ = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__magic_name__ = (car_index + speed) % number_of_cells
# Commit the change of position
__magic_name__ = speed
highway.append(a )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98
| 1
|
A_ : List[Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : Optional[int] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : List[Any] = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
assert len(str(SCREAMING_SNAKE_CASE ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
__UpperCAmelCase = year // 1_0_0
__UpperCAmelCase = (5 * (century % 4) + 2) % 7
__UpperCAmelCase = year % 1_0_0
__UpperCAmelCase = centurian % 1_2
__UpperCAmelCase = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__UpperCAmelCase = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__UpperCAmelCase = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''▁'''
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__UpperCamelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
__UpperCamelCase = {
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
__UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
def __init__( self, lowerCAmelCase__, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__ = None, lowerCAmelCase__=None, lowerCAmelCase__=False, **lowerCAmelCase__, ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case_ = legacy_behaviour
super().__init__(
bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, src_lang=lowerCAmelCase__, tgt_lang=lowerCAmelCase__, additional_special_tokens=lowerCAmelCase__, sp_model_kwargs=self.sp_model_kwargs, legacy_behaviour=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCAmelCase__))
snake_case_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ = 1
snake_case_ = len(self.sp_model)
snake_case_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__)
}
snake_case_ = {v: k for k, v in self.lang_code_to_id.items()}
snake_case_ = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case_ = list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens])
snake_case_ = src_lang if src_lang is not None else 'eng_Latn'
snake_case_ = self.lang_code_to_id[self._src_lang]
snake_case_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self) -> Union[str, Any]:
snake_case_ = self.__dict__.copy()
snake_case_ = None
snake_case_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, lowerCAmelCase__) -> Tuple:
snake_case_ = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs'):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def a_ ( self) -> str:
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a_ ( self) -> str:
return self._src_lang
@src_lang.setter
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__, token_ids_a=lowerCAmelCase__, already_has_special_tokens=lowerCAmelCase__)
snake_case_ = [1] * len(self.prefix_tokens)
snake_case_ = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__)) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__)) + ([0] * len(lowerCAmelCase__)) + suffix_ones
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
snake_case_ = src_lang
snake_case_ = self(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__)
snake_case_ = tgt_lang_id
return inputs
def a_ ( self) -> List[Any]:
snake_case_ = {self.convert_ids_to_tokens(lowerCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def a_ ( self, lowerCAmelCase__) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__, out_type=lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(lowerCAmelCase__)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a_ ( self, lowerCAmelCase__) -> Dict:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def a_ ( self, lowerCAmelCase__) -> List[str]:
snake_case_ = ''.join(lowerCAmelCase__).replace(lowerCAmelCase__, ' ').strip()
return out_string
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, lowerCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase__, 'wb') as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__)
return (out_vocab_file,)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = "eng_Latn", lowerCAmelCase__ = None, lowerCAmelCase__ = "fra_Latn", **lowerCAmelCase__, ) -> BatchEncoding:
snake_case_ = src_lang
snake_case_ = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self) -> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang)
def a_ ( self) -> int:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ = [self.cur_lang_code]
snake_case_ = [self.eos_token_id]
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ = [self.cur_lang_code]
snake_case_ = [self.eos_token_id]
| 69
| 0
|
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : List[str]=None , **_lowercase : Optional[Any] ):
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
__UpperCAmelCase = model
__UpperCAmelCase = kwargs.get('''model_save_dir''' , _lowercase )
__UpperCAmelCase = kwargs.get('''latest_model_name''' , _lowercase )
def __call__( self : Optional[int] , **_lowercase : Union[str, Any] ):
__UpperCAmelCase = {k: np.array(_lowercase ) for k, v in kwargs.items()}
return self.model.run(_lowercase , _lowercase )
@staticmethod
def a ( _lowercase : Union[str, Path] , _lowercase : str=None , _lowercase : Dict=None ):
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
__UpperCAmelCase = '''CPUExecutionProvider'''
return ort.InferenceSession(_lowercase , providers=[provider] , sess_options=_lowercase )
def a ( self : str , _lowercase : Union[str, Path] , _lowercase : Optional[str] = None , **_lowercase : int ):
__UpperCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__UpperCAmelCase = self.model_save_dir.joinpath(self.latest_model_name )
__UpperCAmelCase = Path(_lowercase ).joinpath(_lowercase )
try:
shutil.copyfile(_lowercase , _lowercase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__UpperCAmelCase = self.model_save_dir.joinpath(_lowercase )
if src_path.exists():
__UpperCAmelCase = Path(_lowercase ).joinpath(_lowercase )
try:
shutil.copyfile(_lowercase , _lowercase )
except shutil.SameFileError:
pass
def a ( self : Optional[Any] , _lowercase : Union[str, os.PathLike] , **_lowercase : Optional[int] , ):
if os.path.isfile(_lowercase ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(_lowercase , exist_ok=_lowercase )
# saving model weights/files
self._save_pretrained(_lowercase , **_lowercase )
@classmethod
def a ( cls : str , _lowercase : Union[str, Path] , _lowercase : Optional[Union[bool, str, None]] = None , _lowercase : Optional[Union[str, None]] = None , _lowercase : bool = False , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Optional["ort.SessionOptions"] = None , **_lowercase : List[Any] , ):
__UpperCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_lowercase ):
__UpperCAmelCase = OnnxRuntimeModel.load_model(
os.path.join(_lowercase , _lowercase ) , provider=_lowercase , sess_options=_lowercase )
__UpperCAmelCase = Path(_lowercase )
# load model from hub
else:
# download model
__UpperCAmelCase = hf_hub_download(
repo_id=_lowercase , filename=_lowercase , use_auth_token=_lowercase , revision=_lowercase , cache_dir=_lowercase , force_download=_lowercase , )
__UpperCAmelCase = Path(_lowercase ).parent
__UpperCAmelCase = Path(_lowercase ).name
__UpperCAmelCase = OnnxRuntimeModel.load_model(_lowercase , provider=_lowercase , sess_options=_lowercase )
return cls(model=_lowercase , **_lowercase )
@classmethod
def a ( cls : Union[str, Any] , _lowercase : Union[str, Path] , _lowercase : bool = True , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , **_lowercase : Any , ):
__UpperCAmelCase = None
if len(str(_lowercase ).split('''@''' ) ) == 2:
__UpperCAmelCase , __UpperCAmelCase = model_id.split('''@''' )
return cls._from_pretrained(
model_id=_lowercase , revision=_lowercase , cache_dir=_lowercase , force_download=_lowercase , use_auth_token=_lowercase , **_lowercase , )
| 86
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _UpperCAmelCase :
def __init__( self : Optional[int] , _lowercase : Any , _lowercase : List[str]=14 , _lowercase : Dict=7 , _lowercase : Optional[int]=True , _lowercase : Optional[int]=True , _lowercase : Any=False , _lowercase : Any=True , _lowercase : List[str]=99 , _lowercase : int=32 , _lowercase : Union[str, Any]=4 , _lowercase : Dict=4 , _lowercase : List[Any]=4 , _lowercase : Dict=37 , _lowercase : Tuple="gelu" , _lowercase : Optional[int]=0.1 , _lowercase : Dict=0.1 , _lowercase : Union[str, Any]=5_12 , _lowercase : int=0.02 , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_input_mask
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = rotary_dim
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = None
__UpperCAmelCase = vocab_size - 1
__UpperCAmelCase = vocab_size - 1
__UpperCAmelCase = vocab_size - 1
def a ( self : int ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_lowercase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def a ( self : str ):
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def a ( self : List[Any] , _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : List[str] ):
__UpperCAmelCase = 20
__UpperCAmelCase = model_class_name(_lowercase )
__UpperCAmelCase = model.init_cache(input_ids.shape[0] , _lowercase )
__UpperCAmelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__UpperCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCAmelCase = model(
input_ids[:, :-1] , attention_mask=_lowercase , past_key_values=_lowercase , position_ids=_lowercase , )
__UpperCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCAmelCase = model(
input_ids[:, -1:] , attention_mask=_lowercase , past_key_values=outputs_cache.past_key_values , position_ids=_lowercase , )
__UpperCAmelCase = model(_lowercase )
__UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def a ( self : List[Any] , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Optional[int] , _lowercase : Union[str, Any] ):
__UpperCAmelCase = 20
__UpperCAmelCase = model_class_name(_lowercase )
__UpperCAmelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__UpperCAmelCase = model.init_cache(input_ids.shape[0] , _lowercase )
__UpperCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCAmelCase = model(
input_ids[:, :-1] , attention_mask=_lowercase , past_key_values=_lowercase , position_ids=_lowercase , )
__UpperCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCAmelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_lowercase , position_ids=_lowercase , )
__UpperCAmelCase = model(_lowercase , attention_mask=_lowercase )
__UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
a__ : List[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def a ( self : List[Any] ):
__UpperCAmelCase = FlaxGPTJModelTester(self )
def a ( self : Any ):
for model_class_name in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_lowercase , _lowercase , _lowercase , _lowercase )
def a ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_lowercase , _lowercase , _lowercase , _lowercase )
@tooslow
def a ( self : Tuple ):
__UpperCAmelCase = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
__UpperCAmelCase = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=_lowercase , truncation=_lowercase )
__UpperCAmelCase = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
__UpperCAmelCase = False
__UpperCAmelCase = model.config.eos_token_id
__UpperCAmelCase = jax.jit(model.generate )
__UpperCAmelCase = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
__UpperCAmelCase = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
__UpperCAmelCase = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(_lowercase , _lowercase )
@is_pt_flax_cross_test
def a ( self : Tuple ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
__UpperCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCAmelCase = getattr(_lowercase , _lowercase )
__UpperCAmelCase , __UpperCAmelCase = pt_inputs['''input_ids'''].shape
__UpperCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowercase ):
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = pt_model_class(_lowercase ).eval()
__UpperCAmelCase = model_class(_lowercase , dtype=jnp.floataa )
__UpperCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _lowercase )
__UpperCAmelCase = fx_state
with torch.no_grad():
__UpperCAmelCase = pt_model(**_lowercase ).to_tuple()
__UpperCAmelCase = fx_model(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowercase )
__UpperCAmelCase = model_class.from_pretrained(_lowercase , from_pt=_lowercase )
__UpperCAmelCase = fx_model_loaded(**_lowercase ).to_tuple()
self.assertEqual(
len(_lowercase ) , len(_lowercase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def a ( self : Any ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
__UpperCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCAmelCase = getattr(_lowercase , _lowercase )
__UpperCAmelCase = pt_model_class(_lowercase ).eval()
__UpperCAmelCase = model_class(_lowercase , dtype=jnp.floataa )
__UpperCAmelCase = load_flax_weights_in_pytorch_model(_lowercase , fx_model.params )
__UpperCAmelCase , __UpperCAmelCase = pt_inputs['''input_ids'''].shape
__UpperCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowercase ):
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 0
__UpperCAmelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__UpperCAmelCase = pt_model(**_lowercase ).to_tuple()
__UpperCAmelCase = fx_model(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowercase )
__UpperCAmelCase = pt_model_class.from_pretrained(_lowercase , from_flax=_lowercase )
with torch.no_grad():
__UpperCAmelCase = pt_model_loaded(**_lowercase ).to_tuple()
self.assertEqual(
len(_lowercase ) , len(_lowercase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def a ( self : Tuple ):
for model_class_name in self.all_model_classes:
__UpperCAmelCase = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
__UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
| 86
| 1
|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = AudioLDMPipeline
SCREAMING_SNAKE_CASE : str = TEXT_TO_AUDIO_PARAMS
SCREAMING_SNAKE_CASE : Optional[int] = TEXT_TO_AUDIO_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Dict = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def snake_case__( self : Dict ) ->str:
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(3_2, 6_4) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=_UpperCamelCase , )
snake_case_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
snake_case_ = ClapTextModelWithProjection(_UpperCamelCase )
snake_case_ = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=7_7 )
snake_case_ = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_UpperCamelCase , )
snake_case_ = SpeechTaHifiGan(_UpperCamelCase )
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def snake_case__( self : str , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any]=0 ) ->str:
if str(_UpperCamelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def snake_case__( self : int ) ->Optional[int]:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = AudioLDMPipeline(**_UpperCamelCase )
snake_case_ = audioldm_pipe.to(_UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = audioldm_pipe(**_UpperCamelCase )
snake_case_ = output.audios[0]
assert audio.ndim == 1
assert len(_UpperCamelCase ) == 2_5_6
snake_case_ = audio[:1_0]
snake_case_ = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def snake_case__( self : List[str] ) ->Any:
snake_case_ = self.get_dummy_components()
snake_case_ = AudioLDMPipeline(**_UpperCamelCase )
snake_case_ = audioldm_pipe.to(_UpperCamelCase )
snake_case_ = audioldm_pipe.to(_UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = 3 * [inputs['''prompt''']]
# forward
snake_case_ = audioldm_pipe(**_UpperCamelCase )
snake_case_ = output.audios[0]
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = 3 * [inputs.pop('''prompt''' )]
snake_case_ = audioldm_pipe.tokenizer(
_UpperCamelCase , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_UpperCamelCase , return_tensors='''pt''' , )
snake_case_ = text_inputs['''input_ids'''].to(_UpperCamelCase )
snake_case_ = audioldm_pipe.text_encoder(
_UpperCamelCase , )
snake_case_ = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
snake_case_ = F.normalize(_UpperCamelCase , dim=-1 )
snake_case_ = prompt_embeds
# forward
snake_case_ = audioldm_pipe(**_UpperCamelCase )
snake_case_ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def snake_case__( self : Any ) ->Union[str, Any]:
snake_case_ = self.get_dummy_components()
snake_case_ = AudioLDMPipeline(**_UpperCamelCase )
snake_case_ = audioldm_pipe.to(_UpperCamelCase )
snake_case_ = audioldm_pipe.to(_UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = 3 * ['''this is a negative prompt''']
snake_case_ = negative_prompt
snake_case_ = 3 * [inputs['''prompt''']]
# forward
snake_case_ = audioldm_pipe(**_UpperCamelCase )
snake_case_ = output.audios[0]
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = 3 * [inputs.pop('''prompt''' )]
snake_case_ = []
for p in [prompt, negative_prompt]:
snake_case_ = audioldm_pipe.tokenizer(
_UpperCamelCase , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_UpperCamelCase , return_tensors='''pt''' , )
snake_case_ = text_inputs['''input_ids'''].to(_UpperCamelCase )
snake_case_ = audioldm_pipe.text_encoder(
_UpperCamelCase , )
snake_case_ = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
snake_case_ = F.normalize(_UpperCamelCase , dim=-1 )
embeds.append(_UpperCamelCase )
snake_case_, snake_case_ = embeds
# forward
snake_case_ = audioldm_pipe(**_UpperCamelCase )
snake_case_ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def snake_case__( self : List[Any] ) ->Optional[int]:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
snake_case_ = AudioLDMPipeline(**_UpperCamelCase )
snake_case_ = audioldm_pipe.to(_UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = '''egg cracking'''
snake_case_ = audioldm_pipe(**_UpperCamelCase , negative_prompt=_UpperCamelCase )
snake_case_ = output.audios[0]
assert audio.ndim == 1
assert len(_UpperCamelCase ) == 2_5_6
snake_case_ = audio[:1_0]
snake_case_ = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def snake_case__( self : List[Any] ) ->Union[str, Any]:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
snake_case_ = AudioLDMPipeline(**_UpperCamelCase )
snake_case_ = audioldm_pipe.to(_UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
snake_case_ = audioldm_pipe(_UpperCamelCase , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
snake_case_ = 2
snake_case_ = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
snake_case_ = 2
snake_case_ = audioldm_pipe(_UpperCamelCase , num_inference_steps=2 , num_waveforms_per_prompt=_UpperCamelCase ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
snake_case_ = 2
snake_case_ = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_UpperCamelCase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def snake_case__( self : Optional[int] ) ->List[str]:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = AudioLDMPipeline(**_UpperCamelCase )
snake_case_ = audioldm_pipe.to(_UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = audioldm_pipe.vocoder.config.sampling_rate
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = audioldm_pipe(audio_length_in_s=0.016 , **_UpperCamelCase )
snake_case_ = output.audios[0]
assert audio.ndim == 1
assert len(_UpperCamelCase ) / vocoder_sampling_rate == 0.016
snake_case_ = audioldm_pipe(audio_length_in_s=0.032 , **_UpperCamelCase )
snake_case_ = output.audios[0]
assert audio.ndim == 1
assert len(_UpperCamelCase ) / vocoder_sampling_rate == 0.032
def snake_case__( self : str ) ->List[Any]:
snake_case_ = self.get_dummy_components()
snake_case_ = AudioLDMPipeline(**_UpperCamelCase )
snake_case_ = audioldm_pipe.to(_UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = ['''hey''']
snake_case_ = audioldm_pipe(_UpperCamelCase , num_inference_steps=1 )
snake_case_ = output.audios.shape
assert audio_shape == (1, 2_5_6)
snake_case_ = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
snake_case_ = SpeechTaHifiGan(_UpperCamelCase ).to(_UpperCamelCase )
snake_case_ = audioldm_pipe(_UpperCamelCase , num_inference_steps=1 )
snake_case_ = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def snake_case__( self : Optional[Any] ) ->int:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_UpperCamelCase )
def snake_case__( self : int ) ->Optional[int]:
self._test_inference_batch_single_identical(test_mean_pixel_difference=_UpperCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def snake_case__( self : Optional[Any] ) ->int:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_UpperCamelCase )
@slow
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Any ) ->str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__( self : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str="cpu" , _UpperCamelCase : List[Any]=torch.floataa , _UpperCamelCase : Any=0 ) ->List[str]:
snake_case_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ = np.random.RandomState(_UpperCamelCase ).standard_normal((1, 8, 1_2_8, 1_6) )
snake_case_ = torch.from_numpy(_UpperCamelCase ).to(device=_UpperCamelCase , dtype=_UpperCamelCase )
snake_case_ = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def snake_case__( self : Any ) ->Union[str, Any]:
snake_case_ = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
snake_case_ = audioldm_pipe.to(_UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_inputs(_UpperCamelCase )
snake_case_ = 2_5
snake_case_ = audioldm_pipe(**_UpperCamelCase ).audios[0]
assert audio.ndim == 1
assert len(_UpperCamelCase ) == 8_1_9_2_0
snake_case_ = audio[7_7_2_3_0:7_7_2_4_0]
snake_case_ = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
snake_case_ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def snake_case__( self : List[Any] ) ->str:
snake_case_ = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
snake_case_ = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
snake_case_ = audioldm_pipe.to(_UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_inputs(_UpperCamelCase )
snake_case_ = audioldm_pipe(**_UpperCamelCase ).audios[0]
assert audio.ndim == 1
assert len(_UpperCamelCase ) == 8_1_9_2_0
snake_case_ = audio[2_7_7_8_0:2_7_7_9_0]
snake_case_ = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
snake_case_ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 8
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase : List[str] = logging.get_logger(__name__)
@dataclass
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Optional[Any] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **lowercase) -> Tuple:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
a__ : Union[str, Any] = deprecated_arg[3:]
setattr(self , lowercase , not kwargs.pop(lowercase))
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}')
a__ : Union[str, Any] = kwargs.pop('torchscript' , self.torchscript)
a__ : Tuple = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics)
a__ : Tuple = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level)
super().__init__(**lowercase)
__A : bool = field(default=__UpperCAmelCase , metadata={'''help''': '''Trace the models using torchscript'''} )
__A : bool = field(default=__UpperCAmelCase , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
__A : str = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def __lowercase ( self) -> Tuple["torch.device", int]:
'''simple docstring'''
requires_backends(self , ['torch'])
logger.info('PyTorch: setting up devices')
if not self.cuda:
a__ : List[str] = torch.device('cpu')
a__ : Optional[Any] = 0
elif is_torch_tpu_available():
a__ : List[str] = xm.xla_device()
a__ : Union[str, Any] = 0
else:
a__ : List[Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
a__ : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def __lowercase ( self) -> List[str]:
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def __lowercase ( self) -> int:
'''simple docstring'''
requires_backends(self , ['torch'])
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __lowercase ( self) -> "torch.device":
'''simple docstring'''
requires_backends(self , ['torch'])
return self._setup_devices[0]
@property
def __lowercase ( self) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'])
return self._setup_devices[1]
@property
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return self.n_gpu > 0
| 99
| 0
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__snake_case : List[str] = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 122
|
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : int ) -> float:
"""simple docstring"""
lowerCAmelCase__ = x
lowerCAmelCase__ = y
for step in range(UpperCamelCase_ ): # noqa: B007
lowerCAmelCase__ = a * a - b * b + x
lowerCAmelCase__ = 2 * a * b + y
lowerCAmelCase__ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( UpperCamelCase_ : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _UpperCamelCase ( UpperCamelCase_ : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCamelCase_ , 1 , 1 ) )
def _UpperCamelCase ( UpperCamelCase_ : int = 800 , UpperCamelCase_ : int = 600 , UpperCamelCase_ : float = -0.6 , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 3.2 , UpperCamelCase_ : int = 50 , UpperCamelCase_ : bool = True , ) -> Image.Image:
"""simple docstring"""
lowerCAmelCase__ = Image.new('RGB' , (image_width, image_height) )
lowerCAmelCase__ = img.load()
# loop through the image-coordinates
for image_x in range(UpperCamelCase_ ):
for image_y in range(UpperCamelCase_ ):
# determine the figure-coordinates based on the image-coordinates
lowerCAmelCase__ = figure_width / image_width * image_height
lowerCAmelCase__ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCAmelCase__ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCAmelCase__ = get_distance(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCAmelCase__ = get_color_coded_rgb(UpperCamelCase_ )
else:
lowerCAmelCase__ = get_black_and_white_rgb(UpperCamelCase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__snake_case : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 122
| 1
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
A = object()
# For specifying empty leaf dict `{}`
A = object()
def __A ( a_ :int , a_ :Optional[Any]) -> str:
__a : Dict = tuple((re.compile(x + '''$''') for x in qs))
for i in range(len(a_) - len(a_) + 1):
__a : Optional[Any] = [x.match(a_) for x, y in zip(a_ , ks[i:])]
if matches and all(a_):
return True
return False
def __A ( a_ :Union[str, Any]) -> Optional[Any]:
def replace(a_ :List[Any] , a_ :Dict):
for rule, replacement in rules:
if _match(a_ , a_):
return replacement
return val
return replace
def __A ( ) -> Optional[Any]:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , a_)),
(("transformer", "wte", "embedding"), P('''mp''' , a_)),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(a_ , '''mp''')),
(("attention", "out_proj", "kernel"), P('''mp''' , a_)),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(a_ , '''mp''')),
(("mlp", "c_fc", "bias"), P('''mp''')),
(("mlp", "c_proj", "kernel"), P('''mp''' , a_)),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __A ( a_ :Union[str, Any]) -> Any:
__a : Tuple = _get_partition_rules()
__a : int = _replacement_rules(a_)
__a : Any = {k: _unmatched for k in flatten_dict(a_)}
__a : Optional[int] = {k: replace(a_ , a_) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(a_))
| 160
|
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :str , a_ :str) -> bool:
__a : Optional[Any] = get_failure_array(a_)
# 2) Step through text searching for pattern
__a , __a : Union[str, Any] = 0, 0 # index into text, pattern
while i < len(a_):
if pattern[j] == text[i]:
if j == (len(a_) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__a : List[Any] = failure[j - 1]
continue
i += 1
return False
def __A ( a_ :str) -> list[int]:
__a : List[Any] = [0]
__a : List[Any] = 0
__a : Any = 1
while j < len(a_):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__a : Any = failure[i - 1]
continue
j += 1
failure.append(a_)
return failure
if __name__ == "__main__":
# Test 1)
A = '''abc1abc12'''
A = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
A = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
A = '''ABABX'''
A = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
A = '''AAAB'''
A = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
A = '''abcdabcy'''
A = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
A = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 160
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358
|
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : int ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
_snake_case : List[Any] = np.full((len(snake_case__ ), sequence_length, 2) , snake_case__ )
else:
_snake_case : Any = np.full((len(snake_case__ ), sequence_length) , snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ , snake_case__ ):
_snake_case : Dict = tensor[:sequence_length]
else:
_snake_case : List[Any] = tensor[:sequence_length]
else:
if isinstance(snake_case__ , snake_case__ ):
_snake_case : str = tensor[:sequence_length]
else:
_snake_case : Tuple = tensor[:sequence_length]
return out_tensor.tolist()
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : str = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_snake_case : Union[str, Any] = unicodedata.category(snake_case__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
lowercase__ = -1_00
lowercase__ = "pt"
def UpperCamelCase_ ( self: Any, a_: Union[str, Any] ):
'''simple docstring'''
import torch
_snake_case : Optional[Any] = """label""" if """label""" in features[0].keys() else """labels"""
_snake_case : str = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_snake_case : Any = self.tokenizer.pad(
a_, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="""pt""" if labels is None else None, )
if labels is None:
return batch
_snake_case : Optional[int] = torch.tensor(batch["""entity_ids"""] ).shape[1]
_snake_case : Any = self.tokenizer.padding_side
if padding_side == "right":
_snake_case : Optional[int] = [
list(a_ ) + [self.label_pad_token_id] * (sequence_length - len(a_ )) for label in labels
]
else:
_snake_case : Union[str, Any] = [
[self.label_pad_token_id] * (sequence_length - len(a_ )) + list(a_ ) for label in labels
]
_snake_case : List[Any] = [feature["""ner_tags"""] for feature in features]
_snake_case : str = padding_tensor(a_, -1, a_, a_ )
_snake_case : Any = [feature["""original_entity_spans"""] for feature in features]
_snake_case : int = padding_tensor(a_, (-1, -1), a_, a_ )
_snake_case : str = {k: torch.tensor(a_, dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 132
| 0
|
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[Any] = {
'7B': 1_1_0_0_8,
'13B': 1_3_8_2_4,
'30B': 1_7_9_2_0,
'65B': 2_2_0_1_6,
'70B': 2_8_6_7_2,
}
UpperCAmelCase__ : List[str] = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def lowercase_ ( _snake_case ,_snake_case=1 ,_snake_case=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowercase_ ( _snake_case ):
with open(_snake_case ,"""r""" ) as f:
return json.load(_snake_case )
def lowercase_ ( _snake_case ,_snake_case ):
with open(_snake_case ,"""w""" ) as f:
json.dump(_snake_case ,_snake_case )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=True ):
os.makedirs(_snake_case ,exist_ok=_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(_snake_case ,"""tmp""" )
os.makedirs(_snake_case ,exist_ok=_snake_case )
SCREAMING_SNAKE_CASE__ : Any = read_json(os.path.join(_snake_case ,"""params.json""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = NUM_SHARDS[model_size]
SCREAMING_SNAKE_CASE__ : Optional[int] = params["""n_layers"""]
SCREAMING_SNAKE_CASE__ : str = params["""n_heads"""]
SCREAMING_SNAKE_CASE__ : List[str] = n_heads // num_shards
SCREAMING_SNAKE_CASE__ : List[str] = params["""dim"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = dim // n_heads
SCREAMING_SNAKE_CASE__ : str = 10000.0
SCREAMING_SNAKE_CASE__ : List[str] = 1.0 / (base ** (torch.arange(0 ,_snake_case ,2 ).float() / dims_per_head))
if "n_kv_heads" in params:
SCREAMING_SNAKE_CASE__ : List[str] = params["""n_kv_heads"""] # for GQA / MQA
SCREAMING_SNAKE_CASE__ : str = n_heads_per_shard // num_key_value_heads
SCREAMING_SNAKE_CASE__ : str = dim // num_key_value_heads
else: # compatibility with other checkpoints
SCREAMING_SNAKE_CASE__ : List[Any] = n_heads
SCREAMING_SNAKE_CASE__ : List[Any] = n_heads_per_shard
SCREAMING_SNAKE_CASE__ : List[Any] = dim
# permute for sliced rotary
def permute(_snake_case ,_snake_case=n_heads ,_snake_case=dim ,_snake_case=dim ):
return w.view(_snake_case ,dima // n_heads // 2 ,2 ,_snake_case ).transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(os.path.join(_snake_case ,"""consolidated.00.pth""" ) ,map_location="""cpu""" )
else:
# Sharded
SCREAMING_SNAKE_CASE__ : Tuple = [
torch.load(os.path.join(_snake_case ,f'''consolidated.{i:02d}.pth''' ) ,map_location="""cpu""" )
for i in range(_snake_case )
]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : List[Any] = {"""weight_map""": {}}
for layer_i in range(_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE__ : Dict = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
SCREAMING_SNAKE_CASE__ : int = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
SCREAMING_SNAKE_CASE__ : int = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(_snake_case ,_snake_case ,_snake_case )
for i in range(_snake_case )
] ,dim=0 ,).reshape(_snake_case ,_snake_case ) )
SCREAMING_SNAKE_CASE__ : List[Any] = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
_snake_case ,_snake_case ,_snake_case )
for i in range(_snake_case )
] ,dim=0 ,).reshape(_snake_case ,_snake_case ) ,_snake_case ,_snake_case ,_snake_case ,)
SCREAMING_SNAKE_CASE__ : Dict = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
_snake_case ,_snake_case ,_snake_case )
for i in range(_snake_case )
] ,dim=0 ,).reshape(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(_snake_case )] ,dim=1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_snake_case )] ,dim=0 )
SCREAMING_SNAKE_CASE__ : Dict = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_snake_case )] ,dim=1 )
SCREAMING_SNAKE_CASE__ : Any = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_snake_case )] ,dim=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inv_freq
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[int] = filename
param_count += v.numel()
torch.save(_snake_case ,os.path.join(_snake_case ,_snake_case ) )
SCREAMING_SNAKE_CASE__ : int = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE__ : Dict = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(_snake_case )] ,dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(_snake_case )] ,dim=0 ),
}
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE__ : Tuple = filename
param_count += v.numel()
torch.save(_snake_case ,os.path.join(_snake_case ,_snake_case ) )
# Write configs
SCREAMING_SNAKE_CASE__ : str = {"""total_size""": param_count * 2}
write_json(_snake_case ,os.path.join(_snake_case ,"""pytorch_model.bin.index.json""" ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
SCREAMING_SNAKE_CASE__ : Tuple = params["""multiple_of"""] if """multiple_of""" in params else 256
SCREAMING_SNAKE_CASE__ : Union[str, Any] = LlamaConfig(
hidden_size=_snake_case ,intermediate_size=compute_intermediate_size(_snake_case ,_snake_case ,_snake_case ) ,num_attention_heads=params["""n_heads"""] ,num_hidden_layers=params["""n_layers"""] ,rms_norm_eps=params["""norm_eps"""] ,num_key_value_heads=_snake_case ,)
config.save_pretrained(_snake_case )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
SCREAMING_SNAKE_CASE__ : List[Any] = LlamaForCausalLM.from_pretrained(_snake_case ,torch_dtype=torch.floataa ,low_cpu_mem_usage=_snake_case )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(_snake_case ,safe_serialization=_snake_case )
shutil.rmtree(_snake_case )
def lowercase_ ( _snake_case ,_snake_case ):
# Initialize the tokenizer based on the `spm` model
SCREAMING_SNAKE_CASE__ : int = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer_class(_snake_case )
tokenizer.save_pretrained(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" ,help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" ,)
parser.add_argument(
"""--model_size""" ,choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] ,)
parser.add_argument(
"""--output_dir""" ,help="""Location to write HF model and tokenizer""" ,)
parser.add_argument("""--safe_serialization""" ,type=_snake_case ,help="""Whether or not to save using `safetensors`.""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir ,input_base_path=os.path.join(args.input_dir ,args.model_size ) ,model_size=args.model_size ,safe_serialization=args.safe_serialization ,)
SCREAMING_SNAKE_CASE__ : Any = os.path.join(args.input_dir ,"""tokenizer.model""" )
write_tokenizer(args.output_dir ,_snake_case )
if __name__ == "__main__":
main()
| 25
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 113
| 0
|
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = ''
__lowerCamelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__lowerCamelCase = None # compression type in fsspec. ex: "gzip"
__lowerCamelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self :int , _lowercase :Tuple = "" , _lowercase :Union[str, Any] = None , _lowercase :Optional[Any] = None , **_lowercase :Tuple ):
'''simple docstring'''
super().__init__(self , **_SCREAMING_SNAKE_CASE )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowercase__ = fsspec.open(
_SCREAMING_SNAKE_CASE , mode="rb" , protocol=_SCREAMING_SNAKE_CASE , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowercase__ = os.path.basename(self.file.path.split("::" )[0] )
lowercase__ = (
self.compressed_name[: self.compressed_name.rindex("." )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowercase__ = None
@classmethod
def UpperCAmelCase ( cls :Optional[int] , _lowercase :str ):
'''simple docstring'''
return super()._strip_protocol(_SCREAMING_SNAKE_CASE ).lstrip("/" )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
if self.dir_cache is None:
lowercase__ = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowercase__ = {f['''name''']: f}
def UpperCAmelCase ( self :List[str] , _lowercase :Any ):
'''simple docstring'''
return self.file.open().read()
def UpperCAmelCase ( self :List[str] , _lowercase :List[Any] , _lowercase :List[Any] = "rb" , _lowercase :Tuple=None , _lowercase :List[Any]=True , _lowercase :Dict=None , **_lowercase :Any , ):
'''simple docstring'''
lowercase__ = self._strip_protocol(_SCREAMING_SNAKE_CASE )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'bz2'
__lowerCamelCase = 'bz2'
__lowerCamelCase = '.bz2'
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'gzip'
__lowerCamelCase = 'gzip'
__lowerCamelCase = '.gz'
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'lz4'
__lowerCamelCase = 'lz4'
__lowerCamelCase = '.lz4'
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'xz'
__lowerCamelCase = 'xz'
__lowerCamelCase = '.xz'
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'zstd'
__lowerCamelCase = 'zstd'
__lowerCamelCase = '.zst'
def __init__( self :List[Any] , _lowercase :Any , _lowercase :Optional[int] = "rb" , _lowercase :int = None , _lowercase :Tuple = None , _lowercase :List[Any] = DEFAULT_BLOCK_SIZE , **_lowercase :List[str] , ):
'''simple docstring'''
super().__init__(
fo=_SCREAMING_SNAKE_CASE , mode=_SCREAMING_SNAKE_CASE , target_protocol=_SCREAMING_SNAKE_CASE , target_options=_SCREAMING_SNAKE_CASE , block_size=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowercase__ = self.file.__enter__
class lowerCAmelCase :
def __init__( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
lowercase__ = file_
def __enter__( self :Dict ):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self :Optional[int] , *_lowercase :Optional[Any] , **_lowercase :Optional[int] ):
'''simple docstring'''
self._file.__exit__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __iter__( self :str ):
'''simple docstring'''
return iter(self._file )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
return next(self._file )
def __getattr__( self :List[Any] , _lowercase :int ):
'''simple docstring'''
return getattr(self._file , _SCREAMING_SNAKE_CASE )
def fixed_enter(*_lowercase :str , **_lowercase :int ):
return WrappedFile(_enter(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) )
lowercase__ = fixed_enter
| 371
|
from __future__ import annotations
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = 0
lowercase__ = len(__magic_name__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowercase__ = i + 1
else:
lowercase__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 201
| 0
|
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCamelCase__ ( *snake_case_ : Union[str, Any] ) -> int:
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as fh:
fcntl.flock(SCREAMING_SNAKE_CASE__ , fcntl.LOCK_EX )
try:
print(*SCREAMING_SNAKE_CASE__ )
finally:
fcntl.flock(SCREAMING_SNAKE_CASE__ , fcntl.LOCK_UN )
snake_case_ = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
snake_case_ = torch.device('cuda', local_rank)
snake_case_ = socket.gethostname()
snake_case_ = F'[{hostname}-{local_rank}]'
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
snake_case_ = dist.get_rank()
snake_case_ = dist.get_world_size()
printflock(F'{gpu} is OK (global rank: {rank}/{world_size})')
dist.barrier()
if rank == 0:
printflock(F'pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}')
except Exception:
printflock(F'{gpu} is broken')
raise
| 24
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 8
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Union[str, Any] = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 265
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_A : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : Optional[Any] , A : AutoencoderKL , A : CLIPTextModel , A : CLIPTokenizer , A : UNetaDConditionModel , A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , A : StableDiffusionSafetyChecker , A : CLIPImageProcessor , ) ->List[Any]:
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def __lowerCamelCase ( self : Any , A : Optional[Union[str, int]] = "auto" ) ->Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def __lowerCamelCase ( self : List[Any] ) ->List[Any]:
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self : Union[str, Any] , A : Union[str, List[str]] , A : int = 5_1_2 , A : int = 5_1_2 , A : int = 5_0 , A : float = 7.5 , A : Optional[Union[str, List[str]]] = None , A : Optional[int] = 1 , A : float = 0.0 , A : Optional[torch.Generator] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A : int = 1 , A : Optional[torch.FloatTensor] = None , **A : int , ) ->Tuple:
if isinstance(A , A ):
lowerCamelCase__ : str = 1
elif isinstance(A , A ):
lowerCamelCase__ : Dict = len(A )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(A )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(A )}." )
# get prompt text embeddings
lowerCamelCase__ : Optional[Any] = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
lowerCamelCase__ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase__ : Optional[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowerCamelCase__ : Any = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCamelCase__ : str = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = text_embeddings.shape
lowerCamelCase__ : int = text_embeddings.repeat(1 , A , 1 )
lowerCamelCase__ : str = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase__ : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase__ : List[str]
if negative_prompt is None:
lowerCamelCase__ : Optional[int] = ['''''']
elif type(A ) is not type(A ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(A )} !="
F" {type(A )}." )
elif isinstance(A , A ):
lowerCamelCase__ : Optional[int] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
''' the batch size of `prompt`.''' )
else:
lowerCamelCase__ : List[Any] = negative_prompt
lowerCamelCase__ : int = text_input_ids.shape[-1]
lowerCamelCase__ : Optional[int] = self.tokenizer(
A , padding='''max_length''' , max_length=A , truncation=A , return_tensors='''pt''' , )
lowerCamelCase__ : Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase__ : Any = uncond_embeddings.shape[1]
lowerCamelCase__ : Union[str, Any] = uncond_embeddings.repeat(A , A , 1 )
lowerCamelCase__ : str = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ : str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase__ : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase__ : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 6_4, 6_4)
lowerCamelCase__ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase__ : Dict = torch.randn(
A , generator=A , device='''cpu''' , dtype=A ).to(self.device )
lowerCamelCase__ : Dict = torch.randn(A , generator=A , device='''cpu''' , dtype=A ).to(
self.device )
else:
lowerCamelCase__ : Optional[Any] = torch.randn(
A , generator=A , device=self.device , dtype=A )
lowerCamelCase__ : Union[str, Any] = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowerCamelCase__ : Optional[Any] = latents_reference.to(self.device )
lowerCamelCase__ : Optional[int] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCamelCase__ : List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCamelCase__ : List[Any] = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCamelCase__ : Dict = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCamelCase__ : str = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCamelCase__ : int = 0 if dx < 0 else dx
lowerCamelCase__ : Optional[int] = 0 if dy < 0 else dy
lowerCamelCase__ : Dict = max(-dx , 0 )
lowerCamelCase__ : int = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowerCamelCase__ : str = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase__ : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase__ : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase__ : Union[str, Any] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase__ : List[Any] = {}
if accepts_eta:
lowerCamelCase__ : Any = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ : Any = self.scheduler.scale_model_input(A , A )
# predict the noise residual
lowerCamelCase__ : Union[str, Any] = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase__ , lowerCamelCase__ : List[str] = noise_pred.chunk(2 )
lowerCamelCase__ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : Dict = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
lowerCamelCase__ : Optional[Any] = 1 / 0.1_82_15 * latents
lowerCamelCase__ : int = self.vae.decode(A ).sample
lowerCamelCase__ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowerCamelCase__ : Tuple = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors='''pt''' ).to(
self.device )
lowerCamelCase__ , lowerCamelCase__ : int = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowerCamelCase__ : List[Any] = None
if output_type == "pil":
lowerCamelCase__ : Optional[Any] = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 265
| 1
|
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=64 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Dict = is_training
_lowerCamelCase : int = use_input_mask
_lowerCamelCase : List[Any] = use_token_type_ids
_lowerCamelCase : int = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Any = embedding_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : List[Any] = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : str = num_labels
_lowerCamelCase : int = num_choices
_lowerCamelCase : Any = scope
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : int = None
if self.use_input_mask:
_lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Dict = None
_lowerCamelCase : Any = None
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Union[str, Any] = MobileBertModel(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Optional[int] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
_lowerCamelCase : Union[str, Any] = model(lowercase , token_type_ids=lowercase )
_lowerCamelCase : List[str] = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Union[str, Any] = MobileBertForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : List[Any] = MobileBertForNextSentencePrediction(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : List[str] = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : str = MobileBertForPreTraining(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : int = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , next_sentence_label=lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Any = MobileBertForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : str = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : List[str] = MobileBertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : str = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Dict = self.num_labels
_lowerCamelCase : Optional[int] = MobileBertForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : List[str] = self.num_choices
_lowerCamelCase : Optional[int] = MobileBertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Tuple = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self ):
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
def A_ ( self , lowercase , lowercase , lowercase=False ):
_lowerCamelCase : Optional[int] = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class in get_values(lowercase ):
_lowerCamelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase )
_lowerCamelCase : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def A_ ( self ):
_lowerCamelCase : int = MobileBertModelTester(self )
_lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowercase )
def A_ ( self ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowercase )
def A_ ( self ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowercase )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowercase )
def A_ ( self ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowercase )
def A_ ( self ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowercase )
def A_ ( self ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowercase )
def _snake_case ( lowercase__ ):
return torch.tensor(
lowercase__ , dtype=torch.long , device=lowercase__ , )
lowercase__ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : Optional[int] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowercase )
_lowerCamelCase : Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
_lowerCamelCase : Any = model(lowercase )[0]
_lowerCamelCase : Tuple = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , lowercase )
_lowerCamelCase : List[Any] = torch.tensor(
[
[
[-2.473_6526E07, 8.269_1656E04, 1.652_1838E05],
[-5.754_1704E-01, 3.905_6022E00, 4.401_1507E00],
[2.604_7359E00, 1.567_7652E00, -1.732_4188E-01],
]
] , device=lowercase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
_lowerCamelCase : Any = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
_lowerCamelCase : str = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 96
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCamelCase = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCamelCase = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Dict = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_a )[0]
@deprecated(_a , '''Please use tf.data to implement this functionality.''' )
def lowerCamelCase_ ( _a ):
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_a ) as bytestream:
lowerCAmelCase__ : Any = _readaa(_a )
if magic != 2_051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
lowerCAmelCase__ : Any = _readaa(_a )
lowerCAmelCase__ : Tuple = _readaa(_a )
lowerCAmelCase__ : List[Any] = _readaa(_a )
lowerCAmelCase__ : Union[str, Any] = bytestream.read(rows * cols * num_images )
lowerCAmelCase__ : List[Any] = numpy.frombuffer(_a , dtype=numpy.uinta )
lowerCAmelCase__ : int = data.reshape(_a , _a , _a , 1 )
return data
@deprecated(_a , '''Please use tf.one_hot on tensors.''' )
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = labels_dense.shape[0]
lowerCAmelCase__ : Optional[Any] = numpy.arange(_a ) * num_classes
lowerCAmelCase__ : str = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase__ : Optional[Any] = 1
return labels_one_hot
@deprecated(_a , '''Please use tf.data to implement this functionality.''' )
def lowerCamelCase_ ( _a , _a=False , _a=10 ):
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_a ) as bytestream:
lowerCAmelCase__ : Optional[int] = _readaa(_a )
if magic != 2_049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
lowerCAmelCase__ : Union[str, Any] = _readaa(_a )
lowerCAmelCase__ : Tuple = bytestream.read(_a )
lowerCAmelCase__ : Dict = numpy.frombuffer(_a , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_a , _a )
return labels
class _a :
@deprecated(
_SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple=False , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Optional[Any]=dtypes.floataa , _SCREAMING_SNAKE_CASE : List[str]=True , _SCREAMING_SNAKE_CASE : List[str]=None , )-> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = random_seed.get_seed(_SCREAMING_SNAKE_CASE )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase__ : Optional[int] = dtypes.as_dtype(_SCREAMING_SNAKE_CASE ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
lowerCAmelCase__ : int = 1_0000
lowerCAmelCase__ : List[Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
lowerCAmelCase__ : List[Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase__ : Tuple = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase__ : Any = images.astype(numpy.floataa )
lowerCAmelCase__ : Any = numpy.multiply(_SCREAMING_SNAKE_CASE , 1.0 / 255.0 )
lowerCAmelCase__ : Tuple = images
lowerCAmelCase__ : Tuple = labels
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Tuple = 0
@property
def UpperCAmelCase__( self : Tuple )-> Dict:
return self._images
@property
def UpperCAmelCase__( self : Tuple )-> Optional[int]:
return self._labels
@property
def UpperCAmelCase__( self : Tuple )-> Dict:
return self._num_examples
@property
def UpperCAmelCase__( self : Tuple )-> Any:
return self._epochs_completed
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict=False , _SCREAMING_SNAKE_CASE : Optional[int]=True )-> List[str]:
if fake_data:
lowerCAmelCase__ : Dict = [1] * 784
lowerCAmelCase__ : Union[str, Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_SCREAMING_SNAKE_CASE )],
[fake_label for _ in range(_SCREAMING_SNAKE_CASE )],
)
lowerCAmelCase__ : str = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase__ : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = self.images[perma]
lowerCAmelCase__ : Tuple = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase__ : Any = self._num_examples - start
lowerCAmelCase__ : List[str] = self._images[start : self._num_examples]
lowerCAmelCase__ : Tuple = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase__ : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = self.images[perm]
lowerCAmelCase__ : List[Any] = self.labels[perm]
# Start next epoch
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : Union[str, Any] = batch_size - rest_num_examples
lowerCAmelCase__ : Any = self._index_in_epoch
lowerCAmelCase__ : Optional[Any] = self._images[start:end]
lowerCAmelCase__ : Optional[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase__ : Dict = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_a , '''Please write your own downloading logic.''' )
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
if not gfile.Exists(_a ):
gfile.MakeDirs(_a )
lowerCAmelCase__ : str = os.path.join(_a , _a )
if not gfile.Exists(_a ):
urllib.request.urlretrieve(_a , _a ) # noqa: S310
with gfile.GFile(_a ) as f:
lowerCAmelCase__ : Optional[Any] = f.size()
print('''Successfully downloaded''' , _a , _a , '''bytes.''' )
return filepath
@deprecated(
_a , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def lowerCamelCase_ ( _a , _a=False , _a=False , _a=dtypes.floataa , _a=True , _a=5_000 , _a=None , _a=DEFAULT_SOURCE_URL , ):
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_a , one_hot=_a , dtype=_a , seed=_a )
lowerCAmelCase__ : Tuple = fake()
lowerCAmelCase__ : Union[str, Any] = fake()
lowerCAmelCase__ : Tuple = fake()
return _Datasets(train=_a , validation=_a , test=_a )
if not source_url: # empty string check
lowerCAmelCase__ : Optional[Any] = DEFAULT_SOURCE_URL
lowerCAmelCase__ : Tuple = '''train-images-idx3-ubyte.gz'''
lowerCAmelCase__ : Dict = '''train-labels-idx1-ubyte.gz'''
lowerCAmelCase__ : List[str] = '''t10k-images-idx3-ubyte.gz'''
lowerCAmelCase__ : Optional[int] = '''t10k-labels-idx1-ubyte.gz'''
lowerCAmelCase__ : Optional[Any] = _maybe_download(
_a , _a , source_url + train_images_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : Optional[Any] = _extract_images(_a )
lowerCAmelCase__ : Any = _maybe_download(
_a , _a , source_url + train_labels_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : Any = _extract_labels(_a , one_hot=_a )
lowerCAmelCase__ : Any = _maybe_download(
_a , _a , source_url + test_images_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : str = _extract_images(_a )
lowerCAmelCase__ : Dict = _maybe_download(
_a , _a , source_url + test_labels_file )
with gfile.Open(_a , '''rb''' ) as f:
lowerCAmelCase__ : int = _extract_labels(_a , one_hot=_a )
if not 0 <= validation_size <= len(_a ):
lowerCAmelCase__ : Dict = (
'''Validation size should be between 0 and '''
f'{len(_a )}. Received: {validation_size}.'
)
raise ValueError(_a )
lowerCAmelCase__ : List[str] = train_images[:validation_size]
lowerCAmelCase__ : Any = train_labels[:validation_size]
lowerCAmelCase__ : Optional[Any] = train_images[validation_size:]
lowerCAmelCase__ : Optional[int] = train_labels[validation_size:]
lowerCAmelCase__ : Optional[Any] = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
lowerCAmelCase__ : List[str] = _DataSet(_a , _a , **_a )
lowerCAmelCase__ : Dict = _DataSet(_a , _a , **_a )
lowerCAmelCase__ : Dict = _DataSet(_a , _a , **_a )
return _Datasets(train=_a , validation=_a , test=_a )
| 131
| 0
|
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_a = 'http://www.mocksite.com/file1.txt'
_a = '"text": ["foo", "foo"]'
_a = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 200
SCREAMING_SNAKE_CASE__ : Dict = {"""Content-Length""": """100"""}
SCREAMING_SNAKE_CASE__ : Dict = {}
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
return [bytes(lowercase_ , "utf-8" )]
def __a ( *__lowerCamelCase, **__lowerCamelCase ):
return MockResponse()
@pytest.mark.parametrize("urls_type", [str, list, dict] )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
import requests
monkeypatch.setattr(__lowerCamelCase, "request", __lowerCamelCase )
UpperCAmelCase_ : Tuple = URL
if issubclass(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = url
elif issubclass(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = [url]
elif issubclass(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[str] = {"train": url}
UpperCAmelCase_ : Union[str, Any] = "dummy"
UpperCAmelCase_ : Optional[Any] = "downloads"
UpperCAmelCase_ : Optional[int] = tmp_path
UpperCAmelCase_ : List[str] = DownloadConfig(
cache_dir=os.path.join(__lowerCamelCase, __lowerCamelCase ), use_etag=__lowerCamelCase, )
UpperCAmelCase_ : List[Any] = DownloadManager(dataset_name=__lowerCamelCase, download_config=__lowerCamelCase )
UpperCAmelCase_ : List[Any] = dl_manager.download(__lowerCamelCase )
UpperCAmelCase_ : List[Any] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : str = [downloaded_paths]
UpperCAmelCase_ : int = [urls]
elif isinstance(__lowerCamelCase, __lowerCamelCase ):
assert "train" in downloaded_paths.keys()
UpperCAmelCase_ : Tuple = downloaded_paths.values()
UpperCAmelCase_ : str = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowerCamelCase, __lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
UpperCAmelCase_ : List[str] = Path(__lowerCamelCase )
UpperCAmelCase_ : List[str] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
UpperCAmelCase_ : Union[str, Any] = downloaded_path.read_text()
assert content == CONTENT
UpperCAmelCase_ : str = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
UpperCAmelCase_ : Dict = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type", [str, list, dict] )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Dict = str(__lowerCamelCase )
if issubclass(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[str] = filename
elif issubclass(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = [filename]
elif issubclass(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = {"train": filename}
UpperCAmelCase_ : Optional[int] = "dummy"
UpperCAmelCase_ : List[Any] = xz_file.parent
UpperCAmelCase_ : List[Any] = "extracted"
UpperCAmelCase_ : Tuple = DownloadConfig(
cache_dir=__lowerCamelCase, use_etag=__lowerCamelCase, )
UpperCAmelCase_ : Any = DownloadManager(dataset_name=__lowerCamelCase, download_config=__lowerCamelCase )
UpperCAmelCase_ : List[str] = dl_manager.extract(__lowerCamelCase )
UpperCAmelCase_ : Dict = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = [extracted_paths]
UpperCAmelCase_ : str = [paths]
elif isinstance(__lowerCamelCase, __lowerCamelCase ):
assert "train" in extracted_paths.keys()
UpperCAmelCase_ : str = extracted_paths.values()
UpperCAmelCase_ : Tuple = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowerCamelCase, __lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
UpperCAmelCase_ : Union[str, Any] = Path(__lowerCamelCase )
UpperCAmelCase_ : List[str] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowerCamelCase, etag=__lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
UpperCAmelCase_ : List[str] = extracted_path.read_text()
UpperCAmelCase_ : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def __a ( __lowerCamelCase, __lowerCamelCase ):
assert path.endswith(".jsonl" )
for num_items, line in enumerate(__lowerCamelCase, start=1 ):
UpperCAmelCase_ : Any = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl", ["tar_jsonl_path", "zip_jsonl_path"] )
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = request.getfixturevalue(__lowerCamelCase )
UpperCAmelCase_ : Dict = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ), start=1 ):
_test_jsonl(__lowerCamelCase, __lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl", ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = request.getfixturevalue(__lowerCamelCase )
UpperCAmelCase_ : int = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ), start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCamelCase ), start=1 ):
_test_jsonl(__lowerCamelCase, __lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowerCamelCase ), start=1 ):
assert os.path.basename(__lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 23
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """detr"""
SCREAMING_SNAKE_CASE__ : str = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = backbone_config.get("model_type" )
UpperCAmelCase_ : int = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Any = config_class.from_dict(lowercase_ )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None
UpperCAmelCase_ : int = use_timm_backbone
UpperCAmelCase_ : int = backbone_config
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : int = num_queries
UpperCAmelCase_ : Union[str, Any] = d_model
UpperCAmelCase_ : str = encoder_ffn_dim
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : List[Any] = encoder_attention_heads
UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim
UpperCAmelCase_ : Optional[Any] = decoder_layers
UpperCAmelCase_ : Union[str, Any] = decoder_attention_heads
UpperCAmelCase_ : Optional[int] = dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Any = activation_dropout
UpperCAmelCase_ : str = activation_function
UpperCAmelCase_ : Tuple = init_std
UpperCAmelCase_ : Optional[Any] = init_xavier_std
UpperCAmelCase_ : Optional[Any] = encoder_layerdrop
UpperCAmelCase_ : Optional[int] = decoder_layerdrop
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : int = auxiliary_loss
UpperCAmelCase_ : Optional[Any] = position_embedding_type
UpperCAmelCase_ : Tuple = backbone
UpperCAmelCase_ : Optional[int] = use_pretrained_backbone
UpperCAmelCase_ : Dict = dilation
# Hungarian matcher
UpperCAmelCase_ : Union[str, Any] = class_cost
UpperCAmelCase_ : Any = bbox_cost
UpperCAmelCase_ : int = giou_cost
# Loss coefficients
UpperCAmelCase_ : str = mask_loss_coefficient
UpperCAmelCase_ : Any = dice_loss_coefficient
UpperCAmelCase_ : Optional[Any] = bbox_loss_coefficient
UpperCAmelCase_ : List[str] = giou_loss_coefficient
UpperCAmelCase_ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.d_model
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
return cls(backbone_config=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase_ : str = self.__class__.model_type
return output
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1E-5
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 12
| 23
| 1
|
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
UpperCAmelCase__ = MaskFormerConfig(backbone_config=lowerCamelCase )
UpperCAmelCase__ = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase__ = 8_4_7
UpperCAmelCase__ = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
UpperCAmelCase__ = 1_5_0
UpperCAmelCase__ = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase__ = 1_7_1
UpperCAmelCase__ = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
UpperCAmelCase__ = 1_3_3
UpperCAmelCase__ = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase__ = 1_9
UpperCAmelCase__ = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase__ = 6_5
UpperCAmelCase__ = 'mapillary-vistas-id2label.json'
UpperCAmelCase__ = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) , 'r' ) )
UpperCAmelCase__ = {int(lowerCamelCase ): v for k, v in idalabel.items()}
return config
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = dct.pop(lowerCamelCase )
UpperCAmelCase__ = val
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
UpperCAmelCase__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[:dim, :]
UpperCAmelCase__ = in_proj_bias[: dim]
UpperCAmelCase__ = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase__ = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase__ = in_proj_weight[
-dim :, :
]
UpperCAmelCase__ = in_proj_bias[-dim :]
# fmt: on
def a_ ( lowerCamelCase , lowerCamelCase ):
# fmt: off
UpperCAmelCase__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
UpperCAmelCase__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[: hidden_size, :]
UpperCAmelCase__ = in_proj_bias[:config.hidden_size]
UpperCAmelCase__ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase__ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
UpperCAmelCase__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[: hidden_size, :]
UpperCAmelCase__ = in_proj_bias[:config.hidden_size]
UpperCAmelCase__ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase__ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase__ = in_proj_bias[-hidden_size :]
# fmt: on
def a_ ( ):
UpperCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase__ = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
UpperCAmelCase__ = get_maskformer_config(lowerCamelCase )
# load original state_dict
with open(lowerCamelCase , 'rb' ) as f:
UpperCAmelCase__ = pickle.load(lowerCamelCase )
UpperCAmelCase__ = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase__ = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase__ = torch.from_numpy(lowerCamelCase )
# load 🤗 model
UpperCAmelCase__ = MaskFormerForInstanceSegmentation(lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase , param.shape )
UpperCAmelCase__ , UpperCAmelCase__ = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
UpperCAmelCase__ = prepare_img()
if "vistas" in model_name:
UpperCAmelCase__ = 6_5
elif "cityscapes" in model_name:
UpperCAmelCase__ = 6_5_5_3_5
else:
UpperCAmelCase__ = 2_5_5
UpperCAmelCase__ = True if 'ade' in model_name else False
UpperCAmelCase__ = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase )
UpperCAmelCase__ = image_processor(lowerCamelCase , return_tensors='pt' )
UpperCAmelCase__ = model(**lowerCamelCase )
print('Logits:' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase__ = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase__ : Optional[int] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 98
|
"""simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = max(len(lowerCamelCase ) , len(lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase ) , b_binary.zfill(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a :Optional[int] = logging.get_logger(__name__)
a :Tuple = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[Any] = """data2vec-vision"""
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-1_2 , _a=224 , _a=16 , _a=3 , _a=False , _a=False , _a=False , _a=False , _a=0.1 , _a=0.1 , _a=True , _a=[3, 5, 7, 11] , _a=[1, 2, 3, 6] , _a=True , _a=0.4 , _a=256 , _a=1 , _a=False , _a=255 , **_a , ) -> List[Any]:
"""simple docstring"""
super().__init__(**_a )
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : str = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = image_size
SCREAMING_SNAKE_CASE__ : List[Any] = patch_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_channels
SCREAMING_SNAKE_CASE__ : int = use_mask_token
SCREAMING_SNAKE_CASE__ : List[str] = use_absolute_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_relative_position_bias
SCREAMING_SNAKE_CASE__ : Tuple = use_shared_relative_position_bias
SCREAMING_SNAKE_CASE__ : Any = layer_scale_init_value
SCREAMING_SNAKE_CASE__ : str = drop_path_rate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE__ : Any = out_indices
SCREAMING_SNAKE_CASE__ : Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE__ : Tuple = use_auxiliary_head
SCREAMING_SNAKE_CASE__ : Tuple = auxiliary_loss_weight
SCREAMING_SNAKE_CASE__ : Any = auxiliary_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] = auxiliary_num_convs
SCREAMING_SNAKE_CASE__ : Optional[int] = auxiliary_concat_input
SCREAMING_SNAKE_CASE__ : Optional[int] = semantic_loss_ignore_index
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = version.parse("""1.11""")
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a ( self ) -> float:
"""simple docstring"""
return 1E-4
| 56
|
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> bool:
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
| 1
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
lowerCAmelCase__ : Union[str, Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__ : Any =256
class UpperCAmelCase_ ( _a ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = ['''melgan''']
def __init__( self , _A , _A , _A , _A , _A , ):
'''simple docstring'''
super().__init__()
# From MELGAN
__SCREAMING_SNAKE_CASE = math.log(1e-5 ) # Matches MelGAN training.
__SCREAMING_SNAKE_CASE = 4.0 # Largest value for most examples
__SCREAMING_SNAKE_CASE = 128
self.register_modules(
notes_encoder=lowercase__ , continuous_encoder=lowercase__ , decoder=lowercase__ , scheduler=lowercase__ , melgan=lowercase__ , )
def _A ( self , _A , _A=(-1.0, 1.0) , _A=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output_range
if clip:
__SCREAMING_SNAKE_CASE = torch.clip(lowercase__ , self.min_value , self.max_value )
# Scale to [0, 1].
__SCREAMING_SNAKE_CASE = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _A ( self , _A , _A=(-1.0, 1.0) , _A=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = input_range
__SCREAMING_SNAKE_CASE = torch.clip(lowercase__ , lowercase__ , lowercase__ ) if clip else outputs
# Scale to [0, 1].
__SCREAMING_SNAKE_CASE = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _A ( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = input_tokens > 0
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.notes_encoder(
encoder_input_tokens=lowercase__ , encoder_inputs_mask=lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.continuous_encoder(
encoder_inputs=lowercase__ , encoder_inputs_mask=lowercase__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _A ( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = noise_time
if not torch.is_tensor(lowercase__ ):
__SCREAMING_SNAKE_CASE = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowercase__ ) and len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__SCREAMING_SNAKE_CASE = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__SCREAMING_SNAKE_CASE = self.decoder(
encodings_and_masks=lowercase__ , decoder_input_tokens=lowercase__ , decoder_noise_time=lowercase__ )
return logits
@torch.no_grad()
def __call__( self , _A , _A = None , _A = 100 , _A = True , _A = "numpy" , _A = None , _A = 1 , ):
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase__ , lowercase__ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(lowercase__ )}.""" )
__SCREAMING_SNAKE_CASE = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__SCREAMING_SNAKE_CASE = np.zeros([1, 0, self.n_dims] , np.floataa )
__SCREAMING_SNAKE_CASE = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase__ , device=self.device )
for i, encoder_input_tokens in enumerate(lowercase__ ):
if i == 0:
__SCREAMING_SNAKE_CASE = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__SCREAMING_SNAKE_CASE = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase__ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__SCREAMING_SNAKE_CASE = ones
__SCREAMING_SNAKE_CASE = self.scale_features(
lowercase__ , output_range=[-1.0, 1.0] , clip=lowercase__ )
__SCREAMING_SNAKE_CASE = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase__ , continuous_mask=lowercase__ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__SCREAMING_SNAKE_CASE = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowercase__ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowercase__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__SCREAMING_SNAKE_CASE = self.decode(
encodings_and_masks=lowercase__ , input_tokens=lowercase__ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__SCREAMING_SNAKE_CASE = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ ).prev_sample
__SCREAMING_SNAKE_CASE = self.scale_to_features(lowercase__ , input_range=[-1.0, 1.0] )
__SCREAMING_SNAKE_CASE = mel[:1]
__SCREAMING_SNAKE_CASE = mel.cpu().float().numpy()
__SCREAMING_SNAKE_CASE = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase__ , lowercase__ )
logger.info('Generated segment' , lowercase__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
__SCREAMING_SNAKE_CASE = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__SCREAMING_SNAKE_CASE = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowercase__ )
| 257
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : Tuple = logging.get_logger(__name__)
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__UpperCAmelCase = b.T
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=1 )
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=0 )
__UpperCAmelCase = np.matmul(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = x.reshape(-1 , 3 )
__UpperCAmelCase = squared_euclidean_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return np.argmin(SCREAMING_SNAKE_CASE , axis=1 )
class A_ ( _a ):
'''simple docstring'''
a__ = ["pixel_values"]
def __init__(self , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = True , **lowercase__ , ) -> None:
super().__init__(**lowercase__ )
__UpperCAmelCase = size if size is not None else {'''height''': 256, '''width''': 256}
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = np.array(lowercase__ ) if clusters is not None else None
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = resample
__UpperCAmelCase = do_normalize
__UpperCAmelCase = do_color_quantize
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
__UpperCAmelCase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
lowercase__ , size=(size['''height'''], size['''width''']) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , ) -> np.ndarray:
__UpperCAmelCase = rescale(image=lowercase__ , scale=1 / 127.5 , data_format=lowercase__ )
__UpperCAmelCase = image - 1
return image
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> PIL.Image.Image:
__UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase = size if size is not None else self.size
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = resample if resample is not None else self.resample
__UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCAmelCase = clusters if clusters is not None else self.clusters
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__UpperCAmelCase = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_normalize:
__UpperCAmelCase = [self.normalize(image=lowercase__ ) for image in images]
if do_color_quantize:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = color_quantize(lowercase__ , lowercase__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCAmelCase = images.shape[0]
__UpperCAmelCase = images.reshape(lowercase__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCAmelCase = list(lowercase__ )
else:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
__UpperCAmelCase = {'''input_ids''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 333
| 0
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__lowercase = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__lowercase = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'{len(upper_files)} files contain uppercase characters:')
print('''\n'''.join(upper_files) + '''\n''')
__lowercase = [file for file in filepaths if ''' ''' in file]
if space_files:
print(F'{len(space_files)} files contain space characters:')
print('''\n'''.join(space_files) + '''\n''')
__lowercase = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(F'{len(hyphen_files)} files contain hyphen characters:')
print('''\n'''.join(hyphen_files) + '''\n''')
__lowercase = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'{len(nodir_files)} files are not in a directory:')
print('''\n'''.join(nodir_files) + '''\n''')
__lowercase = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 105
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 105
| 1
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a = 10
a = 256
def _snake_case ( _snake_case : List[str] ) -> Optional[MinHash]:
'''simple docstring'''
if len(_snake_case ) < MIN_NUM_TOKENS:
return None
_A = MinHash(num_perm=_snake_case )
for token in set(_snake_case ):
min_hash.update(token.encode() )
return min_hash
def _snake_case ( _snake_case : str ) -> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_snake_case ) if len(t.strip() ) > 0}
class lowercase_ :
'''simple docstring'''
def __init__( self : int , *,
_UpperCAmelCase : float = 0.85 , ):
_A = duplication_jaccard_threshold
_A = NUM_PERM
_A = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_A = defaultdict(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : MinHash ):
_A = self._index.query(_UpperCAmelCase )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_UpperCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_A = []
for base, duplicates in self._duplicate_clusters.items():
_A = [base] + list(_UpperCAmelCase )
# reformat the cluster to be a list of dict
_A = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(_UpperCAmelCase )
return duplicate_clusters
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Optional[int] ):
_A = self.get_duplicate_clusters()
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( _snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
_A , _A = element
_A = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _snake_case ( _snake_case : Type[Dataset] ) -> List[Any]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_snake_case , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def _snake_case ( _snake_case : Type[Dataset] , _snake_case : float ) -> List[str]:
'''simple docstring'''
_A = DuplicationIndex(duplication_jaccard_threshold=_snake_case )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_snake_case ) ) , max_queue_size=1_00 ) ):
di.add(_snake_case , _snake_case )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _snake_case ( _snake_case : str , _snake_case : str ) -> float:
'''simple docstring'''
_A = get_tokens(_snake_case )
_A = get_tokens(_snake_case )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a = None
def _snake_case ( _snake_case : List[str] , _snake_case : List[str] ) -> Tuple:
'''simple docstring'''
_A = []
for elementa in cluster:
_A = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
_A = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_snake_case , _snake_case ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_A = 1
extremes.append(_snake_case )
return extremes
def _snake_case ( _snake_case : Tuple , _snake_case : Dict , _snake_case : str ) -> str:
'''simple docstring'''
global _shared_dataset
_A = dataset
_A = []
_A = partial(_find_cluster_extremes_shared , jaccard_threshold=_snake_case )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_snake_case , _snake_case , ) , total=len(_snake_case ) , ):
extremes_list.append(_snake_case )
return extremes_list
def _snake_case ( _snake_case : Type[Dataset] , _snake_case : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
_A = make_duplicate_clusters(_snake_case , _snake_case )
_A = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
_A = {}
_A = find_extremes(_snake_case , _snake_case , _snake_case )
for extremes in extremes_clusters:
for element in extremes:
_A = element
_A = duplicate_indices - set(extreme_dict.keys() )
_A = dataset.filter(lambda _snake_case , _snake_case : idx not in remove_indices , with_indices=_snake_case )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_A = element['base_index'] in extreme_dict
if element["is_extreme"]:
_A = extreme_dict[element['base_index']]['copies']
print(F'''Original dataset size: {len(_snake_case )}''' )
print(F'''Number of duplicate clusters: {len(_snake_case )}''' )
print(F'''Files in duplicate cluster: {len(_snake_case )}''' )
print(F'''Unique files in duplicate cluster: {len(_snake_case )}''' )
print(F'''Filtered dataset size: {len(_snake_case )}''' )
return ds_filter, duplicate_clusters
| 315
|
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
_A = eval_examples
_A = post_process_function
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ):
_A = self.eval_dataset if eval_dataset is None else eval_dataset
_A = self.get_eval_dataloader(_UpperCAmelCase )
_A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
else:
_A = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase )
return metrics
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ):
_A = self.get_test_dataloader(_UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
| 315
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = StableDiffusionInstructPixaPixPipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
A = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A = IMAGE_TO_IMAGE_IMAGE_PARAMS
A = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __snake_case (self ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_: Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D"""), up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D"""), cross_attention_dim=32, )
UpperCAmelCase_: Optional[Any] = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
torch.manual_seed(0 )
UpperCAmelCase_: Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], latent_channels=4, )
torch.manual_seed(0 )
UpperCAmelCase_: Optional[int] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
UpperCAmelCase_: List[str] = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase_: Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> List[str]:
UpperCAmelCase_: str = floats_tensor((1, 3, 32, 32), rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = image.cpu().permute(0, 2, 3, 1 )[0]
UpperCAmelCase_: Optional[int] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert("""RGB""" )
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
UpperCAmelCase_: int = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_: List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: Dict = self.get_dummy_components()
UpperCAmelCase_: Dict = StableDiffusionInstructPixaPixPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = sd_pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCAmelCase_: Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_: Tuple = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: List[Any] = self.get_dummy_components()
UpperCAmelCase_: Dict = StableDiffusionInstructPixaPixPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = """french fries"""
UpperCAmelCase_: Optional[Any] = sd_pipe(**SCREAMING_SNAKE_CASE_, negative_prompt=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = output.images
UpperCAmelCase_: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_: int = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: int = self.get_dummy_components()
UpperCAmelCase_: str = StableDiffusionInstructPixaPixPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = [inputs["""prompt"""]] * 2
UpperCAmelCase_: str = np.array(inputs["""image"""] ).astype(np.floataa ) / 2_5_5.0
UpperCAmelCase_: Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = image / 2 + 0.5
UpperCAmelCase_: List[Any] = image.permute(0, 3, 1, 2 )
UpperCAmelCase_: Union[str, Any] = image.repeat(2, 1, 1, 1 )
UpperCAmelCase_: Tuple = sd_pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCAmelCase_: Tuple = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
UpperCAmelCase_: Optional[Any] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: Tuple = self.get_dummy_components()
UpperCAmelCase_: List[str] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule="""scaled_linear""" )
UpperCAmelCase_: Dict = StableDiffusionInstructPixaPixPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = sd_pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCAmelCase_: Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_: int = [round(SCREAMING_SNAKE_CASE_, 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(SCREAMING_SNAKE_CASE_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_: List[str] = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __snake_case (self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __snake_case (self ) -> str:
UpperCAmelCase_: Union[str, Any] = self.get_dummy_components()
UpperCAmelCase_: Dict = StableDiffusionInstructPixaPixPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = VaeImageProcessor(do_resize=SCREAMING_SNAKE_CASE_, do_normalize=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = pipe(**self.get_dummy_inputs_by_type(SCREAMING_SNAKE_CASE_, input_image_type="""pt""" ) )[0]
UpperCAmelCase_: Union[str, Any] = components["""vae"""]
UpperCAmelCase_: str = self.get_dummy_inputs_by_type(SCREAMING_SNAKE_CASE_, input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
UpperCAmelCase_: Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
UpperCAmelCase_: Union[str, Any] = pipe(**SCREAMING_SNAKE_CASE_ )[0]
UpperCAmelCase_: Tuple = np.abs(out - out_latents_inputs ).max()
self.assertLess(SCREAMING_SNAKE_CASE_, 1E-4, """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def __snake_case (self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case (self, SCREAMING_SNAKE_CASE_=0 ) -> List[Any]:
UpperCAmelCase_: Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
UpperCAmelCase_: Any = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def __snake_case (self ) -> Any:
UpperCAmelCase_: Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""", safety_checker=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
UpperCAmelCase_: str = self.get_inputs()
UpperCAmelCase_: str = pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCAmelCase_: List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_: int = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""", safety_checker=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
UpperCAmelCase_: Optional[int] = self.get_inputs()
UpperCAmelCase_: List[Any] = pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCAmelCase_: int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_: Dict = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __snake_case (self ) -> str:
UpperCAmelCase_: Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""", safety_checker=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
UpperCAmelCase_: Any = self.get_inputs()
UpperCAmelCase_: Tuple = pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCAmelCase_: List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_: List[Any] = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __snake_case (self ) -> Dict:
UpperCAmelCase_: Any = 0
def callback_fn(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCAmelCase_: str = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase_: int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_: Dict = latents[0, -3:, -3:, -1]
UpperCAmelCase_: int = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
UpperCAmelCase_: Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_: int = latents[0, -3:, -3:, -1]
UpperCAmelCase_: List[Any] = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
UpperCAmelCase_: Optional[int] = False
UpperCAmelCase_: Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""", safety_checker=SCREAMING_SNAKE_CASE_, torch_dtype=torch.floataa )
UpperCAmelCase_: List[str] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
UpperCAmelCase_: Optional[Any] = self.get_inputs()
pipe(**SCREAMING_SNAKE_CASE_, callback=SCREAMING_SNAKE_CASE_, callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __snake_case (self ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_: Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""", safety_checker=SCREAMING_SNAKE_CASE_, torch_dtype=torch.floataa )
UpperCAmelCase_: Any = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_: Union[str, Any] = self.get_inputs()
UpperCAmelCase_: Union[str, Any] = pipe(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __snake_case (self ) -> int:
UpperCAmelCase_: Optional[int] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase_: int = inputs["""image"""].resize((504, 504) )
UpperCAmelCase_: Any = """timbrooks/instruct-pix2pix"""
UpperCAmelCase_: int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_, safety_checker=SCREAMING_SNAKE_CASE_, )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
UpperCAmelCase_: Any = pipe(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = output.images[0]
UpperCAmelCase_: int = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
UpperCAmelCase_: str = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 82
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ():
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: List[Any] = """mock-s3-bucket"""
UpperCAmelCase_: str = F's3://{mock_bucket}'
UpperCAmelCase_: Any = extract_path_from_uri(lowerCAmelCase__ )
assert dataset_path.startswith("""s3://""" ) is False
UpperCAmelCase_: Tuple = """./local/path"""
UpperCAmelCase_: Any = extract_path_from_uri(lowerCAmelCase__ )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ (lowerCAmelCase__: Dict ):
"""simple docstring"""
UpperCAmelCase_: int = is_remote_filesystem(lowerCAmelCase__ )
assert is_remote is True
UpperCAmelCase_: Optional[Any] = fsspec.filesystem("""file""" )
UpperCAmelCase_: int = is_remote_filesystem(lowerCAmelCase__ )
assert is_remote is False
@pytest.mark.parametrize("""compression_fs_class""" , lowerCAmelCase__ )
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: List[str] , lowerCAmelCase__: Dict , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Tuple ):
"""simple docstring"""
UpperCAmelCase_: int = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
UpperCAmelCase_: Dict = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase_: str = F'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase__ )
UpperCAmelCase_: Optional[int] = fsspec.filesystem(compression_fs_class.protocol , fo=lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_: Optional[Any] = os.path.basename(lowerCAmelCase__ )
UpperCAmelCase_: Optional[Any] = expected_filename[: expected_filename.rindex(""".""" )]
assert fs.glob("""*""" ) == [expected_filename]
with fs.open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" ) as f, open(lowerCAmelCase__ , encoding="""utf-8""" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] )
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: Tuple , lowerCAmelCase__: Any ):
"""simple docstring"""
UpperCAmelCase_: List[str] = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
UpperCAmelCase_: Tuple = compressed_file_paths[protocol]
UpperCAmelCase_: int = """dataset.jsonl"""
UpperCAmelCase_: Any = F'{protocol}://{member_file_path}::{compressed_file_path}'
UpperCAmelCase_ , *UpperCAmelCase_: Dict = fsspec.get_fs_token_paths(lowerCAmelCase__ )
assert fs.isfile(lowerCAmelCase__ )
assert not fs.isfile("""non_existing_""" + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[str] , lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
UpperCAmelCase_: Tuple = hf_api.dataset_info(lowerCAmelCase__ , token=lowerCAmelCase__ )
UpperCAmelCase_: List[str] = HfFileSystem(repo_info=lowerCAmelCase__ , token=lowerCAmelCase__ )
assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"]
assert hffs.isdir("""data""" )
assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" )
with open(lowerCAmelCase__ ) as f:
assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read()
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: List[str] = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowerCAmelCase__ , lowerCAmelCase__ , clobber=lowerCAmelCase__ )
with pytest.warns(lowerCAmelCase__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowerCAmelCase__ ) == 1
assert (
str(warning_info[0].message )
== F'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 82
| 1
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : jnp.ndarray
UpperCamelCase__ : jnp.ndarray
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
UpperCamelCase__ : int
UpperCamelCase__ : Tuple[int] = (16, 32, 96, 256)
UpperCamelCase__ : jnp.dtype = jnp.floataa
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__SCREAMING_SNAKE_CASE = []
for i in range(len(self.block_out_channels ) - 1 ):
__SCREAMING_SNAKE_CASE = self.block_out_channels[i]
__SCREAMING_SNAKE_CASE = self.block_out_channels[i + 1]
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
__SCREAMING_SNAKE_CASE = blocks
__SCREAMING_SNAKE_CASE = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.conv_in(_A )
__SCREAMING_SNAKE_CASE = nn.silu(_A )
for block in self.blocks:
__SCREAMING_SNAKE_CASE = block(_A )
__SCREAMING_SNAKE_CASE = nn.silu(_A )
__SCREAMING_SNAKE_CASE = self.conv_out(_A )
return embedding
@flax_register_to_config
class UpperCAmelCase_ ( nn.Module , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : int = 32
UpperCamelCase__ : int = 4
UpperCamelCase__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCamelCase__ : Union[bool, Tuple[bool]] = False
UpperCamelCase__ : Tuple[int] = (320, 640, 1280, 1280)
UpperCamelCase__ : int = 2
UpperCamelCase__ : Union[int, Tuple[int]] = 8
UpperCamelCase__ : Optional[Union[int, Tuple[int]]] = None
UpperCamelCase__ : int = 1280
UpperCamelCase__ : float = 0.0
UpperCamelCase__ : bool = False
UpperCamelCase__ : jnp.dtype = jnp.floataa
UpperCamelCase__ : bool = True
UpperCamelCase__ : int = 0
UpperCamelCase__ : str = "rgb"
UpperCamelCase__ : Tuple[int] = (16, 32, 96, 256)
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (1, self.in_channels, self.sample_size, self.sample_size)
__SCREAMING_SNAKE_CASE = jnp.zeros(_A , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = jnp.ones((1,) , dtype=jnp.intaa )
__SCREAMING_SNAKE_CASE = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = (1, 3, self.sample_size * 8, self.sample_size * 8)
__SCREAMING_SNAKE_CASE = jnp.zeros(_A , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = jax.random.split(_A )
__SCREAMING_SNAKE_CASE = {'params': params_rng, 'dropout': dropout_rng}
return self.init(_A , _A , _A , _A , _A )["params"]
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.block_out_channels
__SCREAMING_SNAKE_CASE = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__SCREAMING_SNAKE_CASE = self.num_attention_heads or self.attention_head_dim
# input
__SCREAMING_SNAKE_CASE = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__SCREAMING_SNAKE_CASE = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__SCREAMING_SNAKE_CASE = FlaxTimestepEmbedding(_A , dtype=self.dtype )
__SCREAMING_SNAKE_CASE = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
__SCREAMING_SNAKE_CASE = self.only_cross_attention
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = (num_attention_heads,) * len(self.down_block_types )
# down
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = block_out_channels[0]
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
for i, down_block_type in enumerate(self.down_block_types ):
__SCREAMING_SNAKE_CASE = output_channel
__SCREAMING_SNAKE_CASE = block_out_channels[i]
__SCREAMING_SNAKE_CASE = i == len(_A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__SCREAMING_SNAKE_CASE = FlaxCrossAttnDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
__SCREAMING_SNAKE_CASE = FlaxDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_A )
for _ in range(self.layers_per_block ):
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
if not is_final_block:
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
__SCREAMING_SNAKE_CASE = down_blocks
__SCREAMING_SNAKE_CASE = controlnet_down_blocks
# mid
__SCREAMING_SNAKE_CASE = block_out_channels[-1]
__SCREAMING_SNAKE_CASE = FlaxUNetMidBlockaDCrossAttn(
in_channels=_A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A , _A , _A , _A , _A = 1.0 , _A = True , _A = False , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__SCREAMING_SNAKE_CASE = jnp.flip(_A , axis=1 )
# 1. time
if not isinstance(_A , jnp.ndarray ):
__SCREAMING_SNAKE_CASE = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_A , jnp.ndarray ) and len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE = timesteps.astype(dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(_A , 0 )
__SCREAMING_SNAKE_CASE = self.time_proj(_A )
__SCREAMING_SNAKE_CASE = self.time_embedding(_A )
# 2. pre-process
__SCREAMING_SNAKE_CASE = jnp.transpose(_A , (0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE = self.conv_in(_A )
__SCREAMING_SNAKE_CASE = jnp.transpose(_A , (0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE = self.controlnet_cond_embedding(_A )
sample += controlnet_cond
# 3. down
__SCREAMING_SNAKE_CASE = (sample,)
for down_block in self.down_blocks:
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = down_block(_A , _A , _A , deterministic=not train )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = down_block(_A , _A , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__SCREAMING_SNAKE_CASE = self.mid_block(_A , _A , _A , deterministic=not train )
# 5. contronet blocks
__SCREAMING_SNAKE_CASE = ()
for down_block_res_sample, controlnet_block in zip(_A , self.controlnet_down_blocks ):
__SCREAMING_SNAKE_CASE = controlnet_block(_A )
controlnet_down_block_res_samples += (down_block_res_sample,)
__SCREAMING_SNAKE_CASE = controlnet_down_block_res_samples
__SCREAMING_SNAKE_CASE = self.controlnet_mid_block(_A )
# 6. scaling
__SCREAMING_SNAKE_CASE = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_A , mid_block_res_sample=_A )
| 257
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : int = MBartConfig
UpperCamelCase__ : Optional[Any] = {}
UpperCamelCase__ : Union[str, Any] = '''gelu'''
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=False , _A=99 , _A=32 , _A=2 , _A=4 , _A=37 , _A=0.1 , _A=0.1 , _A=20 , _A=2 , _A=1 , _A=0 , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__SCREAMING_SNAKE_CASE = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def _A ( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFMBartModel(config=_A ).get_decoder()
__SCREAMING_SNAKE_CASE = inputs_dict['input_ids']
__SCREAMING_SNAKE_CASE = input_ids[:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict['attention_mask'][:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict['head_mask']
__SCREAMING_SNAKE_CASE = 1
# first forward pass
__SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
__SCREAMING_SNAKE_CASE = past_key_values[1]
def __lowercase ( a__ , a__ , a__ , a__=None , a__=None , a__=None , a__=None , a__=None , ) -> str:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(a__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Any = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCamelCase__ : int = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ : Optional[Any] = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ : List[str] = True
UpperCamelCase__ : Tuple = False
UpperCamelCase__ : Union[str, Any] = False
def _A ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFMBartModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_A )
def _A ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Any = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
UpperCamelCase__ : str = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
UpperCamelCase__ : List[str] = '''facebook/mbart-large-en-ro'''
@cached_property
def _A ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _A ( self , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def _A ( self , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **_A , return_tensors='tf' )
__SCREAMING_SNAKE_CASE = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def _A ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 257
| 1
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a__ : Tuple = 6_37_81_37.0
a__ : Any = 6_35_67_52.31_42_45
a__ : str = 6_3_7_8_1_3_7
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__SCREAMING_SNAKE_CASE = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
__SCREAMING_SNAKE_CASE = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__SCREAMING_SNAKE_CASE = haversine_distance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__SCREAMING_SNAKE_CASE = (b_lata + b_lata) / 2
__SCREAMING_SNAKE_CASE = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__SCREAMING_SNAKE_CASE = (sin(lowerCAmelCase_ ) ** 2) * (cos(lowerCAmelCase_ ) ** 2)
__SCREAMING_SNAKE_CASE = cos(sigma / 2 ) ** 2
__SCREAMING_SNAKE_CASE = (sigma - sin(lowerCAmelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__SCREAMING_SNAKE_CASE = (cos(lowerCAmelCase_ ) ** 2) * (sin(lowerCAmelCase_ ) ** 2)
__SCREAMING_SNAKE_CASE = sin(sigma / 2 ) ** 2
__SCREAMING_SNAKE_CASE = (sigma + sin(lowerCAmelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str = IFInpaintingSuperResolutionPipeline
snake_case__ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case__ : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"})
snake_case__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
return self._get_superresolution_dummy_components()
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str]=0 ) -> List[str]:
if str(UpperCAmelCase__ ).startswith("mps" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCAmelCase_ ( self : str ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase_ ( self : str ) -> Tuple:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
self._test_save_load_local()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 195
| 1
|
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __UpperCamelCase :
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
lowerCamelCase_ =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
lowerCamelCase_ =UNetaDConditionModel(
sample_size=32, layers_per_block=1, block_out_channels=[32, 64], down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
], mid_block_type='''UNetMidBlock2DSimpleCrossAttn''', up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''], in_channels=3, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type='''text''', addition_embed_type_num_heads=2, cross_attention_norm='''group_norm''', resnet_time_scale_shift='''scale_shift''', act_fn='''gelu''', )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCamelCase_ =DDPMScheduler(
num_train_timesteps=1_000, beta_schedule='''squaredcos_cap_v2''', beta_start=0.0_0_0_1, beta_end=0.0_2, thresholding=lowerCAmelCase, dynamic_thresholding_ratio=0.9_5, sample_max_value=1.0, prediction_type='''epsilon''', variance_type='''learned_range''', )
torch.manual_seed(0 )
lowerCamelCase_ =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
lowerCamelCase_ =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
lowerCamelCase_ =UNetaDConditionModel(
sample_size=32, layers_per_block=[1, 2], block_out_channels=[32, 64], down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
], mid_block_type='''UNetMidBlock2DSimpleCrossAttn''', up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''], in_channels=6, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type='''text''', addition_embed_type_num_heads=2, cross_attention_norm='''group_norm''', resnet_time_scale_shift='''scale_shift''', act_fn='''gelu''', class_embed_type='''timestep''', mid_block_scale_factor=1.4_1_4, time_embedding_act_fn='''gelu''', time_embedding_dim=32, )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCamelCase_ =DDPMScheduler(
num_train_timesteps=1_000, beta_schedule='''squaredcos_cap_v2''', beta_start=0.0_0_0_1, beta_end=0.0_2, thresholding=lowerCAmelCase, dynamic_thresholding_ratio=0.9_5, sample_max_value=1.0, prediction_type='''epsilon''', variance_type='''learned_range''', )
torch.manual_seed(0 )
lowerCamelCase_ =DDPMScheduler(
num_train_timesteps=1_000, beta_schedule='''squaredcos_cap_v2''', beta_start=0.0_0_0_1, beta_end=0.0_2, )
torch.manual_seed(0 )
lowerCamelCase_ =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
lowerCamelCase_ =inputs['''prompt''']
lowerCamelCase_ =inputs['''generator''']
lowerCamelCase_ =inputs['''num_inference_steps''']
lowerCamelCase_ =inputs['''output_type''']
if "image" in inputs:
lowerCamelCase_ =inputs['''image''']
else:
lowerCamelCase_ =None
if "mask_image" in inputs:
lowerCamelCase_ =inputs['''mask_image''']
else:
lowerCamelCase_ =None
if "original_image" in inputs:
lowerCamelCase_ =inputs['''original_image''']
else:
lowerCamelCase_ =None
lowerCamelCase_, lowerCamelCase_ =pipe.encode_prompt(lowerCAmelCase )
# inputs with prompt converted to embeddings
lowerCamelCase_ ={
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
lowerCamelCase_ =image
if mask_image is not None:
lowerCamelCase_ =mask_image
if original_image is not None:
lowerCamelCase_ =original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =pipe(**lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =self.pipeline_class.from_pretrained(lowerCAmelCase )
pipe_loaded.to(lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase, lowerCAmelCase ) is None, f'''`{optional_component}` did not stay set to None after loading.''', )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
lowerCamelCase_ =inputs['''generator''']
lowerCamelCase_ =inputs['''num_inference_steps''']
lowerCamelCase_ =inputs['''output_type''']
# inputs with prompt converted to embeddings
lowerCamelCase_ ={
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
lowerCamelCase_ =image
if mask_image is not None:
lowerCamelCase_ =mask_image
if original_image is not None:
lowerCamelCase_ =original_image
lowerCamelCase_ =pipe_loaded(**lowerCAmelCase )[0]
lowerCamelCase_ =np.abs(to_np(lowerCAmelCase ) - to_np(lowerCAmelCase ) ).max()
self.assertLess(lowerCAmelCase, 1e-4 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
lowerCamelCase_ =pipe(**lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =self.pipeline_class.from_pretrained(lowerCAmelCase )
pipe_loaded.to(lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
lowerCamelCase_ =pipe_loaded(**lowerCAmelCase )[0]
lowerCamelCase_ =np.abs(to_np(lowerCAmelCase ) - to_np(lowerCAmelCase ) ).max()
self.assertLess(lowerCAmelCase, 1e-4 )
| 75
|
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> bool:
return str(__UpperCAmelCase ) == str(__UpperCAmelCase )[::-1]
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> int:
return int(__UpperCAmelCase ) + int(str(__UpperCAmelCase )[::-1] )
def lowerCAmelCase_ ( __UpperCAmelCase: int = 1_0000 ) -> int:
UpperCamelCase__ : Optional[Any] = []
for num in range(1 , __UpperCAmelCase ):
UpperCamelCase__ : str = 0
UpperCamelCase__ : Any = num
while iterations < 50:
UpperCamelCase__ : List[Any] = sum_reverse(__UpperCAmelCase )
iterations += 1
if is_palindrome(__UpperCAmelCase ):
break
else:
lychrel_nums.append(__UpperCAmelCase )
return len(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 201
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[int] = 'mgp-str'
def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any]=[32, 128] , lowerCAmelCase__ : Tuple=4 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Any=27 , lowerCAmelCase__ : Union[str, Any]=38 , lowerCAmelCase__ : Optional[Any]=50257 , lowerCAmelCase__ : Optional[Any]=30522 , lowerCAmelCase__ : str=768 , lowerCAmelCase__ : Tuple=12 , lowerCAmelCase__ : Dict=12 , lowerCAmelCase__ : str=4.0 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Optional[Any]=1e-5 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : int=0.02 , **lowerCAmelCase__ : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = max_token_length
_UpperCamelCase = num_character_labels
_UpperCamelCase = num_bpe_labels
_UpperCamelCase = num_wordpiece_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = mlp_ratio
_UpperCamelCase = distilled
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = drop_rate
_UpperCamelCase = qkv_bias
_UpperCamelCase = attn_drop_rate
_UpperCamelCase = drop_path_rate
_UpperCamelCase = output_aa_attentions
_UpperCamelCase = initializer_range
| 287
|
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : str = 16
lowercase__ : int = 32
def a__ ( lowercase : Accelerator, lowercase : int = 16 ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_UpperCamelCase = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(lowercase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowercase, max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCamelCase = datasets.map(
lowercase, batched=lowercase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(lowercase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCamelCase = 8
else:
_UpperCamelCase = None
return tokenizer.pad(
lowercase, padding='''longest''', max_length=lowercase, pad_to_multiple_of=lowercase, return_tensors='''pt''', )
# Instantiate dataloaders.
_UpperCamelCase = DataLoader(
tokenized_datasets['''train'''], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
_UpperCamelCase = DataLoader(
tokenized_datasets['''validation'''], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : str = mocked_dataloaders # noqa: F811
def a__ ( lowercase : List[Any], lowercase : List[str] ) -> Any:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', lowercase ) == "1":
_UpperCamelCase = 2
# Initialize accelerator
_UpperCamelCase = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase = config['''lr''']
_UpperCamelCase = int(config['''num_epochs'''] )
_UpperCamelCase = int(config['''seed'''] )
_UpperCamelCase = int(config['''batch_size'''] )
_UpperCamelCase = evaluate.load('''glue''', '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase )
def inner_training_loop(lowercase : List[Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCamelCase = AdamW(params=model.parameters(), lr=lowercase )
_UpperCamelCase , _UpperCamelCase = get_dataloaders(lowercase, lowercase )
# Instantiate scheduler
_UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=lowercase, num_warmup_steps=100, num_training_steps=(len(lowercase ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = accelerator.prepare(
lowercase, lowercase, lowercase, lowercase, lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCamelCase = model(**lowercase )
_UpperCamelCase = outputs.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCamelCase = model(**lowercase )
_UpperCamelCase = outputs.logits.argmax(dim=-1 )
_UpperCamelCase , _UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowercase, references=lowercase, )
_UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""", lowercase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=lowercase, default=lowercase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowercase, lowercase )
if __name__ == "__main__":
main()
| 287
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : str = {"vocab_file": "sentencepiece.bpe.model"}
a : str = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
a : List[Any] = {
"camembert-base": 5_12,
}
a : Dict = "▁"
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[str] = ["input_ids", "attention_mask"]
def __init__( self , snake_case , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=["<s>NOTUSED", "</s>NOTUSED"] , snake_case = None , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
UpperCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case ) )
UpperCAmelCase : int = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
UpperCAmelCase : List[str] = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
UpperCAmelCase : Optional[Any] = len(self.fairseq_tokens_to_ids )
UpperCAmelCase : Union[str, Any] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
UpperCAmelCase : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Any = [self.cls_token_id]
UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A_ ( self , snake_case , snake_case = None , snake_case = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Dict = [self.sep_token_id]
UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A_ ( self ):
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self , snake_case ):
'''simple docstring'''
return self.sp_model.encode(snake_case , out_type=snake_case )
def A_ ( self , snake_case ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(snake_case ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(snake_case )
def A_ ( self , snake_case ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Any = ""
UpperCAmelCase : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case ) + token
UpperCAmelCase : List[str] = True
UpperCAmelCase : Optional[int] = []
else:
current_sub_tokens.append(snake_case )
UpperCAmelCase : int = False
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def __getstate__( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.__dict__.copy()
UpperCAmelCase : Any = None
return state
def __setstate__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase : Tuple = {}
UpperCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase : Tuple = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , "wb" ) as fi:
UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 311
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase : Tuple = 192
UpperCAmelCase : str = 768
UpperCAmelCase : List[Any] = 12
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : List[Any] = [800, 1333]
UpperCAmelCase : List[str] = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Union[str, Any] = 330
UpperCAmelCase : Union[str, Any] = 14
UpperCAmelCase : Any = 6
UpperCAmelCase : int = 1320
elif "yolos_s" in yolos_name:
UpperCAmelCase : Union[str, Any] = 384
UpperCAmelCase : Dict = 1536
UpperCAmelCase : str = 12
UpperCAmelCase : List[str] = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase : int = [800, 1344]
UpperCAmelCase : Optional[int] = 91
UpperCAmelCase : int = "huggingface/label-files"
UpperCAmelCase : Union[str, Any] = "coco-detection-id2label.json"
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase : str = {int(__magic_name__ ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : str = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase : Tuple = in_proj_bias[-config.hidden_size :]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "backbone" in name:
UpperCAmelCase : int = name.replace("backbone" , "vit" )
if "cls_token" in name:
UpperCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
UpperCAmelCase : int = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
UpperCAmelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
UpperCAmelCase : int = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
UpperCAmelCase : Tuple = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase : Dict = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
UpperCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
UpperCAmelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
UpperCAmelCase : Tuple = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase : Optional[int] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
UpperCAmelCase : str = key.split("." )
UpperCAmelCase : List[Any] = int(key_split[2] )
UpperCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase : Optional[int] = val[:dim, :]
UpperCAmelCase : Union[str, Any] = val[
dim : dim * 2, :
]
UpperCAmelCase : Any = val[-dim:, :]
else:
UpperCAmelCase : Tuple = val[:dim]
UpperCAmelCase : List[str] = val[dim : dim * 2]
UpperCAmelCase : Any = val[-dim:]
else:
UpperCAmelCase : Union[str, Any] = val
return orig_state_dict
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
UpperCAmelCase : Tuple = get_yolos_config(__magic_name__ )
# load original state_dict
UpperCAmelCase : int = torch.load(__magic_name__ , map_location="cpu" )["model"]
# load 🤗 model
UpperCAmelCase : int = YolosForObjectDetection(__magic_name__ )
model.eval()
UpperCAmelCase : Dict = convert_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase : Dict = 800 if yolos_name != "yolos_ti" else 512
UpperCAmelCase : int = YolosImageProcessor(format="coco_detection" , size=__magic_name__ )
UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase : List[str] = model(**__magic_name__ )
UpperCAmelCase , UpperCAmelCase : Optional[int] = outputs.logits, outputs.pred_boxes
UpperCAmelCase , UpperCAmelCase : Optional[Any] = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase : str = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
UpperCAmelCase : Tuple = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
UpperCAmelCase : List[str] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase : List[str] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
UpperCAmelCase : Dict = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase : Dict = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
UpperCAmelCase : List[Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
UpperCAmelCase : str = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
UpperCAmelCase : int = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
UpperCAmelCase : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(__magic_name__ , organization="hustvl" )
model.push_to_hub(__magic_name__ , organization="hustvl" )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 311
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : Optional[int] ="""mra"""
def __init__( self , UpperCamelCase_=5_0265 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=1 , UpperCamelCase_=0.02 , UpperCamelCase_=1E-5 , UpperCamelCase_="absolute" , UpperCamelCase_=4 , UpperCamelCase_="full" , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
lowercase_ :str = vocab_size
lowercase_ :List[Any] = max_position_embeddings
lowercase_ :Tuple = hidden_size
lowercase_ :str = num_hidden_layers
lowercase_ :int = num_attention_heads
lowercase_ :Union[str, Any] = intermediate_size
lowercase_ :Tuple = hidden_act
lowercase_ :List[str] = hidden_dropout_prob
lowercase_ :Dict = attention_probs_dropout_prob
lowercase_ :str = initializer_range
lowercase_ :List[str] = type_vocab_size
lowercase_ :int = layer_norm_eps
lowercase_ :int = position_embedding_type
lowercase_ :List[str] = block_per_row
lowercase_ :Tuple = approx_mode
lowercase_ :Any = initial_prior_first_n_blocks
lowercase_ :Dict = initial_prior_diagonal_n_blocks
| 252
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
SCREAMING_SNAKE_CASE : Dict = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCamelCase ( _a , _a=None ) -> Optional[int]:
'''simple docstring'''
require_version(deps[pkg] , _a )
| 252
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__: str = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: int = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: Union[str, Any] = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: int = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCamelCase__: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__: str = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: int = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: Union[str, Any] = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: int = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCamelCase__: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23
| 1
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
a__: Dict = logging.get_logger(__name__)
a__: int = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
a__: str = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] )->List[str]:
for attribute in key.split('''.''' ):
A__ = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
A__ = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
A__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
A__ = value
elif weight_type == "weight_g":
A__ = value
elif weight_type == "weight_v":
A__ = value
elif weight_type == "bias":
A__ = value
else:
A__ = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] )->Dict:
A__ = []
A__ = fairseq_model.state_dict()
A__ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
A__ = True
else:
for key, mapped_key in MAPPING.items():
A__ = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
A__ = True
if "*" in mapped_key:
A__ = name.split(UpperCamelCase__ )[0].split('''.''' )[-2]
A__ = mapped_key.replace('''*''' , UpperCamelCase__ )
if "weight_g" in name:
A__ = '''weight_g'''
elif "weight_v" in name:
A__ = '''weight_v'''
elif "bias" in name:
A__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A__ = '''weight'''
else:
A__ = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(f"Unused weights: {unused_weights}" )
def UpperCamelCase__( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int )->Optional[int]:
A__ = full_name.split('''conv_layers.''' )[-1]
A__ = name.split('''.''' )
A__ = int(items[0] )
A__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
A__ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
A__ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
A__ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
A__ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def UpperCamelCase__( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : int=True )->Any:
if config_path is not None:
A__ = UniSpeechSatConfig.from_pretrained(UpperCamelCase__ )
else:
A__ = UniSpeechSatConfig()
A__ = ''''''
if is_finetuned:
A__ = UniSpeechSatForCTC(UpperCamelCase__ )
else:
A__ = UniSpeechSatForPreTraining(UpperCamelCase__ )
A__ , A__ , A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
A__ = model[0].eval()
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ )
hf_wavavec.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
a__: int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
a__: Optional[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 359
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''MCTCTFeatureExtractor'''
__SCREAMING_SNAKE_CASE = '''AutoTokenizer'''
def __init__( self,__lowerCamelCase,__lowerCamelCase ):
super().__init__(__lowerCamelCase,__lowerCamelCase )
A__ = self.feature_extractor
A__ = False
def __call__( self,*__lowerCamelCase,**__lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase,**__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
A__ = kwargs.pop('''raw_speech''' )
else:
A__ = kwargs.pop('''audio''',__lowerCamelCase )
A__ = kwargs.pop('''sampling_rate''',__lowerCamelCase )
A__ = kwargs.pop('''text''',__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
A__ = args[0]
A__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
A__ = self.feature_extractor(__lowerCamelCase,*__lowerCamelCase,sampling_rate=__lowerCamelCase,**__lowerCamelCase )
if text is not None:
A__ = self.tokenizer(__lowerCamelCase,**__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A__ = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.batch_decode(*__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__lowerCamelCase,**__lowerCamelCase )
A__ = kwargs.pop('''input_features''',__lowerCamelCase )
A__ = kwargs.pop('''labels''',__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
A__ = args[0]
A__ = args[1:]
if input_features is not None:
A__ = self.feature_extractor.pad(__lowerCamelCase,*__lowerCamelCase,**__lowerCamelCase )
if labels is not None:
A__ = self.tokenizer.pad(__lowerCamelCase,**__lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A__ = labels['''input_ids''']
return input_features
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.decode(*__lowerCamelCase,**__lowerCamelCase )
@contextmanager
def UpperCamelCase ( self ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
A__ = True
A__ = self.tokenizer
yield
A__ = self.feature_extractor
A__ = False
| 39
| 0
|
import re
def A ( a_ ) -> bool:
__UpperCamelCase : Any =re.compile(
r'^(?:0|94|\+94|0{2}94)' r'7(0|1|2|4|5|6|7|8)' r'(-| |)' r'\d{7}$' )
return bool(re.search(a_ ,a_ ) )
if __name__ == "__main__":
A_ :List[str] = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 71
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class _lowerCamelCase ( _lowercase ):
def __init__(self , *__a , **__a ) -> None:
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 153
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__lowercase = logging.getLogger(__name__)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = """token-classification"""
def __init__( self , __lowercase) -> str:
if type(__lowercase) == dict:
__UpperCamelCase :List[Any] = Namespace(**__lowercase)
__UpperCamelCase :Dict = import_module('''tasks''')
try:
__UpperCamelCase :str = getattr(__lowercase , hparams.task_type)
__UpperCamelCase :TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""")
__UpperCamelCase :Tuple = self.token_classification_task.get_labels(hparams.labels)
__UpperCamelCase :Tuple = CrossEntropyLoss().ignore_index
super().__init__(__lowercase , len(self.labels) , self.mode)
def UpperCamelCase__ ( self , **__lowercase) -> List[Any]:
return self.model(**__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Any:
__UpperCamelCase :str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
__UpperCamelCase :Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
__UpperCamelCase :Dict = self(**__lowercase)
__UpperCamelCase :str = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :List[Any] = self.hparams
for mode in ["train", "dev", "test"]:
__UpperCamelCase :int = self._feature_file(__lowercase)
if os.path.exists(__lowercase) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __lowercase)
__UpperCamelCase :Any = torch.load(__lowercase)
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir)
__UpperCamelCase :Any = self.token_classification_task.read_examples_from_file(args.data_dir , __lowercase)
__UpperCamelCase :Union[str, Any] = self.token_classification_task.convert_examples_to_features(
__lowercase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet''']) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__lowercase , pad_on_left=bool(self.config.model_type in ['''xlnet''']) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __lowercase)
torch.save(__lowercase , __lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = False) -> DataLoader:
__UpperCamelCase :Tuple = self._feature_file(__lowercase)
logger.info('''Loading features from cached file %s''' , __lowercase)
__UpperCamelCase :str = torch.load(__lowercase)
__UpperCamelCase :int = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
__UpperCamelCase :Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
if features[0].token_type_ids is not None:
__UpperCamelCase :str = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
else:
__UpperCamelCase :Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long)
# HACK(we will not use this anymore soon)
__UpperCamelCase :int = torch.tensor([f.label_ids for f in features] , dtype=torch.long)
return DataLoader(
TensorDataset(__lowercase , __lowercase , __lowercase , __lowercase) , batch_size=__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Dict:
"""Compute validation""" ""
__UpperCamelCase :int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
__UpperCamelCase :Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
__UpperCamelCase :Any = self(**__lowercase)
__UpperCamelCase , __UpperCamelCase :Tuple = outputs[:2]
__UpperCamelCase :List[str] = logits.detach().cpu().numpy()
__UpperCamelCase :List[str] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
__UpperCamelCase :Tuple = torch.stack([x['''val_loss'''] for x in outputs]).mean()
__UpperCamelCase :str = np.concatenate([x['''pred'''] for x in outputs] , axis=0)
__UpperCamelCase :Any = np.argmax(__lowercase , axis=2)
__UpperCamelCase :str = np.concatenate([x['''target'''] for x in outputs] , axis=0)
__UpperCamelCase :List[str] = dict(enumerate(self.labels))
__UpperCamelCase :Tuple = [[] for _ in range(out_label_ids.shape[0])]
__UpperCamelCase :Any = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
__UpperCamelCase :Any = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__lowercase , __lowercase),
'''precision''': precision_score(__lowercase , __lowercase),
'''recall''': recall_score(__lowercase , __lowercase),
'''f1''': fa_score(__lowercase , __lowercase),
}
__UpperCamelCase :Dict = dict(results.items())
__UpperCamelCase :List[str] = results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __lowercase) -> int:
# when stable
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = self._eval_end(__lowercase)
__UpperCamelCase :Tuple = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __lowercase) -> int:
# updating to test_epoch_end instead of deprecated test_end
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[int] = self._eval_end(__lowercase)
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
__UpperCamelCase :Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __lowercase , __lowercase) -> Union[str, Any]:
# Add NER specific options
BaseTransformer.add_model_specific_args(__lowercase , __lowercase)
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__lowercase , help='''Task type to fine tune in training (e.g. NER, POS, etc)''')
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__lowercase , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__lowercase , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''')
return parser
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__lowercase = NERTransformer.add_model_specific_args(parser, os.getcwd())
__lowercase = parser.parse_args()
__lowercase = NERTransformer(args)
__lowercase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__lowercase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
__lowercase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 105
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.