code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import requests
__lowerCamelCase = """YOUR API KEY"""
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str = giphy_api_key ):
snake_case : Tuple = "+".join(query.split() )
snake_case : Union[str, Any] = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
snake_case : Optional[Any] = requests.get(__lowerCamelCase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 59 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any:
lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCamelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[int] = ""
else:
lowerCamelCase : List[str] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = val
def A ( ) -> List[str]:
lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = DeiTConfig()
# all deit models have fine-tuned heads
lowerCamelCase : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase : Dict = 1000
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Dict = int(deit_name[-6:-4] )
lowerCamelCase : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowerCamelCase : Optional[Any] = 192
lowerCamelCase : List[str] = 768
lowerCamelCase : Tuple = 12
lowerCamelCase : Optional[Any] = 3
elif deit_name[9:].startswith("small" ):
lowerCamelCase : str = 384
lowerCamelCase : Optional[Any] = 1536
lowerCamelCase : Dict = 12
lowerCamelCase : Optional[int] = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowerCamelCase : str = 1024
lowerCamelCase : List[str] = 4096
lowerCamelCase : Any = 24
lowerCamelCase : Dict = 16
# load original model from timm
lowerCamelCase : List[Any] = timm.create_model(_SCREAMING_SNAKE_CASE ,pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase : Dict = timm_model.state_dict()
lowerCamelCase : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowerCamelCase : Optional[Any] = DeiTForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCamelCase : Any = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=_SCREAMING_SNAKE_CASE ,crop_size=config.image_size )
lowerCamelCase : str = image_processor(images=prepare_img() ,return_tensors="pt" )
lowerCamelCase : int = encoding["pixel_values"]
lowerCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE ,outputs.logits ,atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 48 | 0 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class A :
def __init__(self , lowerCAmelCase=2 , lowerCAmelCase=3 , lowerCAmelCase=6_4 , lowerCAmelCase=None ):
__lowercase= np.random.default_rng(lowerCAmelCase )
__lowercase= length
__lowercase= rng.normal(size=(length,) ).astype(np.floataa )
__lowercase= a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__(self ):
return self.length
def __getitem__(self , lowerCAmelCase ):
return {"x": self.x[i], "y": self.y[i]}
class A ( torch.nn.Module ):
def __init__(self , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=False ):
super().__init__()
__lowercase= torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowercase= torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowercase= True
def _A (self , lowerCAmelCase=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__lowercase= False
return x * self.a[0] + self.b[0]
class A ( torch.nn.Module ):
def __init__(self , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=False ):
super().__init__()
__lowercase= torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
__lowercase= torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
__lowercase= True
def _A (self , lowerCAmelCase=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__lowercase= False
return x * self.a + self.b
def _lowerCamelCase( lowercase__ , lowercase__ = 1_6 ) -> str:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
__lowercase= AutoTokenizer.from_pretrained('bert-base-cased' )
__lowercase= {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
__lowercase= load_dataset('csv' , data_files=lowercase__ )
__lowercase= datasets['train'].unique('label' )
__lowercase= {v: i for i, v in enumerate(lowercase__ )}
def tokenize_function(lowercase__ ):
# max_length=None => use the model max length (it's actually the default)
__lowercase= tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' )
if "label" in examples:
__lowercase= [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase= datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(lowercase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return tokenizer.pad(lowercase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__lowercase= DataLoader(tokenized_datasets['train'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=2 )
__lowercase= DataLoader(tokenized_datasets['validation'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 304 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
__lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= generator.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'cyberpunk 2077'
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= 'A painting of a squirrel eating a burger '
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.text_to_image(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 304 | 1 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Tuple =StableDiffusionDiffEditPipeline
lowercase : List[str] =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
lowercase : Optional[int] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
lowercase : Tuple =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase : Union[str, Any] =frozenset([] )
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase, )
lowerCamelCase_ =DDIMScheduler(
beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='''scaled_linear''', clip_sample=lowerCAmelCase, set_alpha_to_one=lowerCAmelCase, )
lowerCamelCase_ =DDIMInverseScheduler(
beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='''scaled_linear''', clip_sample=lowerCAmelCase, set_alpha_to_zero=lowerCAmelCase, )
torch.manual_seed(0 )
lowerCamelCase_ =AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
lowerCamelCase_ =CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='''gelu''', projection_dim=512, )
lowerCamelCase_ =CLIPTextModel(lowerCAmelCase )
lowerCamelCase_ =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase_ ={
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 16, 16), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowerCamelCase_ =floats_tensor((1, 2, 4, 16, 16), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCamelCase_ ={
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase_ =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' )
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCamelCase_ ={
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase_ =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' )
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCamelCase_ ={
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
if not hasattr(self.pipeline_class, '''_optional_components''' ):
return
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
lowerCamelCase_ =pipe(**lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =self.pipeline_class.from_pretrained(lowerCAmelCase )
pipe_loaded.to(lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase, lowerCAmelCase ) is None, f'''`{optional_component}` did not stay set to None after loading.''', )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
lowerCamelCase_ =pipe_loaded(**lowerCAmelCase )[0]
lowerCamelCase_ =np.abs(output - output_loaded ).max()
self.assertLess(lowerCAmelCase, 1e-4 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_mask_inputs(lowerCAmelCase )
lowerCamelCase_ =pipe.generate_mask(**lowerCAmelCase )
lowerCamelCase_ =mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
lowerCamelCase_ =np.array([0] * 9 )
lowerCamelCase_ =np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase, 1e-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inversion_inputs(lowerCAmelCase )
lowerCamelCase_ =pipe.invert(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
lowerCamelCase_ =np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9], )
lowerCamelCase_ =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase, 1e-3 )
def lowercase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ ={'''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''beta_schedule''': '''scaled_linear'''}
lowerCamelCase_ =DPMSolverMultistepScheduler(**lowerCAmelCase )
lowerCamelCase_ =DPMSolverMultistepInverseScheduler(**lowerCAmelCase )
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inversion_inputs(lowerCAmelCase )
lowerCamelCase_ =pipe.invert(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
lowerCamelCase_ =np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9], )
lowerCamelCase_ =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase, 1e-3 )
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
lowerCamelCase_ =raw_image.convert('''RGB''' ).resize((768, 768) )
lowerCamelCase_ =raw_image
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowerCamelCase_ =DDIMScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ =DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ ='''a bowl of fruit'''
lowerCamelCase_ ='''a bowl of pears'''
lowerCamelCase_ =pipe.generate_mask(
image=self.raw_image, source_prompt=lowerCAmelCase, target_prompt=lowerCAmelCase, generator=lowerCAmelCase, )
lowerCamelCase_ =pipe.invert(
prompt=lowerCAmelCase, image=self.raw_image, inpaint_strength=0.7, generator=lowerCAmelCase ).latents
lowerCamelCase_ =pipe(
prompt=lowerCAmelCase, mask_image=lowerCAmelCase, image_latents=lowerCAmelCase, generator=lowerCAmelCase, negative_prompt=lowerCAmelCase, inpaint_strength=0.7, output_type='''numpy''', ).images[0]
lowerCamelCase_ =(
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowerCamelCase_ =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ =DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ ='''a bowl of fruit'''
lowerCamelCase_ ='''a bowl of pears'''
lowerCamelCase_ =pipe.generate_mask(
image=self.raw_image, source_prompt=lowerCAmelCase, target_prompt=lowerCAmelCase, generator=lowerCAmelCase, )
lowerCamelCase_ =pipe.invert(
prompt=lowerCAmelCase, image=self.raw_image, inpaint_strength=0.7, generator=lowerCAmelCase, num_inference_steps=25, ).latents
lowerCamelCase_ =pipe(
prompt=lowerCAmelCase, mask_image=lowerCAmelCase, image_latents=lowerCAmelCase, generator=lowerCAmelCase, negative_prompt=lowerCAmelCase, inpaint_strength=0.7, num_inference_steps=25, output_type='''numpy''', ).images[0]
lowerCamelCase_ =(
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 75 |
'''simple docstring'''
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 237 | 0 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : Any = tmp_path / """file.csv"""
snake_case_ : Any = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : Optional[int] = tmp_path / """malformed_file.csv"""
snake_case_ : int = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : str = tmp_path / """csv_with_image.csv"""
snake_case_ : int = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :Any ):
'''simple docstring'''
snake_case_ : int = tmp_path / """csv_with_label.csv"""
snake_case_ : Tuple = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = tmp_path / """csv_with_int_list.csv"""
snake_case_ : str = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :Tuple ):
'''simple docstring'''
snake_case_ : int = Csv()
snake_case_ : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(lowerCamelCase_ , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(lowerCamelCase_ ) in record.message
for record in caplog.records )
@require_pil
def UpperCAmelCase ( lowerCamelCase_ :Tuple ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding="""utf-8""" ) as f:
snake_case_ : Tuple = f.read().splitlines()[1]
snake_case_ : str = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
snake_case_ : Tuple = csv._generate_tables([[csv_file_with_image]] )
snake_case_ : Optional[Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
snake_case_ : List[str] = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding="""utf-8""" ) as f:
snake_case_ : List[Any] = f.read().splitlines()[1:]
snake_case_ : Union[str, Any] = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] )
snake_case_ : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
snake_case_ : Union[str, Any] = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(lowerCamelCase_ ) for label in labels]
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : str = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda lowerCamelCase_ : [int(lowerCamelCase_ ) for i in x.split()]} )
snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] )
snake_case_ : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
snake_case_ : Dict = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]] | 8 |
'''simple docstring'''
import functools
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : List[str] = len(lowerCamelCase_ )
snake_case_ : Dict = len(lowerCamelCase_ )
@functools.cache
def min_distance(lowerCamelCase_ :int , lowerCamelCase_ :int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
snake_case_ : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCamelCase_ ) , 1 + min_distance(lowerCamelCase_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 1 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
SCREAMING_SNAKE_CASE__ : List[str] = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
SCREAMING_SNAKE_CASE__ : str = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
SCREAMING_SNAKE_CASE__ : Dict = 'allenai'
def A ( _SCREAMING_SNAKE_CASE ) -> Any:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase : Optional[Any] = dict((re.sub(r"@@$" ,"" ,_SCREAMING_SNAKE_CASE ), v) if k.endswith("@@" ) else (re.sub(r"$" ,"</w>" ,_SCREAMING_SNAKE_CASE ), v) for k, v in d.items() )
lowerCamelCase : Optional[Any] = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
lowerCamelCase : Union[str, Any] = d[k] # restore
return da
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
# prep
assert os.path.exists(_SCREAMING_SNAKE_CASE )
os.makedirs(_SCREAMING_SNAKE_CASE ,exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase : Dict = basename(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = dirname(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase : List[str] = cls.hub_models()
lowerCamelCase : Dict = {"bpe": "fastbpe", "tokenizer": "moses"}
lowerCamelCase : Optional[int] = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
lowerCamelCase : Any = hub_utils.from_pretrained(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,archive_map=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = vars(chkpt["args"]["model"] )
lowerCamelCase : int = args["source_lang"]
lowerCamelCase : Union[str, Any] = args["target_lang"]
lowerCamelCase : Dict = dirname(_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = basename(_SCREAMING_SNAKE_CASE )
# dicts
lowerCamelCase : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE ,f'''dict.{src_lang}.txt''' )
lowerCamelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE ,f'''dict.{tgt_lang}.txt''' )
lowerCamelCase : Dict = Dictionary.load(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = rewrite_dict_keys(src_dict.indices )
lowerCamelCase : Any = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE ,"vocab-src.json" )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ,ensure_ascii=_SCREAMING_SNAKE_CASE ,indent=_SCREAMING_SNAKE_CASE ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase : Optional[int] = False
break
lowerCamelCase : int = Dictionary.load(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE ,"vocab-tgt.json" )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ,ensure_ascii=_SCREAMING_SNAKE_CASE ,indent=_SCREAMING_SNAKE_CASE ) )
# merges_file (bpecodes)
lowerCamelCase : List[str] = os.path.join(_SCREAMING_SNAKE_CASE ,VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if os.path.exists(_SCREAMING_SNAKE_CASE ):
break
with open(_SCREAMING_SNAKE_CASE ,encoding="utf-8" ) as fin:
lowerCamelCase : List[str] = fin.read()
lowerCamelCase : Optional[Any] = re.sub(r" \d+$" ,"" ,_SCREAMING_SNAKE_CASE ,0 ,re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as fout:
fout.write(_SCREAMING_SNAKE_CASE )
# model config
lowerCamelCase : Any = os.path.join(_SCREAMING_SNAKE_CASE ,"config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args["bpe"]}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args["tokenizer"]}'''
lowerCamelCase : Tuple = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
lowerCamelCase : Optional[Any] = 5
lowerCamelCase : Any = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase : Optional[Any] = best_score_hparams[model_dir]["length_penalty"]
else:
lowerCamelCase : Optional[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ,ensure_ascii=_SCREAMING_SNAKE_CASE ,indent=_SCREAMING_SNAKE_CASE ) )
# tokenizer config
lowerCamelCase : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1024,
"do_lower_case": do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ,ensure_ascii=_SCREAMING_SNAKE_CASE ,indent=_SCREAMING_SNAKE_CASE ) )
# model
lowerCamelCase : List[str] = chkpt["models"][0]
lowerCamelCase : Dict = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase : List[str] = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase : Tuple = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = FSMTConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = FSMTForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# check that it loads ok
model_new.load_state_dict(_SCREAMING_SNAKE_CASE ,strict=_SCREAMING_SNAKE_CASE )
# save
lowerCamelCase : Any = os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 48 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int = 1000 ):
'''simple docstring'''
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 158 | 0 |
"""simple docstring"""
from __future__ import annotations
def A ( snake_case :list[int] , snake_case :list[int] , snake_case :int ) -> tuple[float, list[float]]:
__UpperCamelCase = list(range(len(snake_case ) ) )
__UpperCamelCase = [v / w for v, w in zip(snake_case , snake_case )]
index.sort(key=lambda snake_case : ratio[i] , reverse=snake_case )
__UpperCamelCase = 0
__UpperCamelCase = [0] * len(snake_case )
for i in index:
if weight[i] <= capacity:
__UpperCamelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
__UpperCamelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
UpperCamelCase : List[Any] = False
@skip_mps
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = StableDiffusionAttendAndExcitePipeline
lowercase = False
lowercase = TEXT_TO_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} )
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def UpperCAmelCase ( cls ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , )
__UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__UpperCamelCase = CLIPTextModel(__UpperCAmelCase )
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCamelCase = __UpperCamelCase = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = 'cpu'
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__UpperCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__UpperCamelCase = pipe(**__UpperCAmelCase ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
__UpperCamelCase = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
__UpperCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCAmelCase , 1E-3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase ( cls ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = torch.manual_seed(51 )
__UpperCamelCase = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to('cuda' )
__UpperCamelCase = 'a painting of an elephant with glasses'
__UpperCamelCase = [5, 7]
__UpperCamelCase = pipe(
prompt=__UpperCAmelCase , token_indices=__UpperCAmelCase , guidance_scale=7.5 , generator=__UpperCAmelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
__UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263 | 1 |
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_a = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 17 |
import numpy
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray , __lowercase : numpy.ndarray ):
"""simple docstring"""
snake_case_ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
snake_case_ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
snake_case_ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
snake_case_ = numpy.random.rand(3 , 1 )
# Real output values provided.
snake_case_ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
snake_case_ = numpy.zeros(output_array.shape )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
snake_case_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
snake_case_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
snake_case_ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
snake_case_ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case__ ( self : Optional[Any] , __lowercase : numpy.ndarray , __lowercase : int , __lowercase : bool ):
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
snake_case_ = self.feedforward()
self.back_propagation()
if give_loss:
snake_case_ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"Iteration {iteration} Loss: {loss}" )
def snake_case__ ( self : Union[str, Any] , __lowercase : numpy.ndarray ):
"""simple docstring"""
snake_case_ = input_arr
snake_case_ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
snake_case_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
snake_case_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return (value) * (1 - (value))
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
snake_case_ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
snake_case_ = TwoHiddenLayerNeuralNetwork(
input_array=_A , output_array=_A )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_A , iterations=10 , give_loss=_A )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 187 | 0 |
A_ : Tuple = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> list[str]:
UpperCamelCase_: List[str] = set()
# keep track of all the paths to be checked
UpperCamelCase_: Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCamelCase_: Tuple = queue.pop(0 )
# get the last node from the path
UpperCamelCase_: Any = path[-1]
if node not in explored:
UpperCamelCase_: Any = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCamelCase_: Dict = list(UpperCAmelCase__ )
new_path.append(UpperCAmelCase__ )
queue.append(UpperCAmelCase__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(UpperCAmelCase__ )
# in case there's no path between the 2 nodes
return []
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCamelCase_: List[Any] = [start]
UpperCamelCase_: str = set(UpperCAmelCase__ )
# Keep tab on distances from `start` node.
UpperCamelCase_: Union[str, Any] = {start: 0, target: -1}
while queue:
UpperCamelCase_: Optional[int] = queue.pop(0 )
if node == target:
UpperCamelCase_: Optional[Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(UpperCAmelCase__ )
queue.append(UpperCAmelCase__ )
UpperCamelCase_: Tuple = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4 | 292 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
A_ : Tuple = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def snake_case (UpperCAmelCase__ ) -> str:
for pegasus_name, hf_name in PATTERNS:
UpperCamelCase_: List[str] = k.replace(UpperCAmelCase__ , UpperCAmelCase__ )
return k
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> PegasusForConditionalGeneration:
UpperCamelCase_: List[str] = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase__ )
UpperCamelCase_: Tuple = PegasusConfig(**UpperCAmelCase__ )
UpperCamelCase_: Tuple = PegasusForConditionalGeneration(UpperCAmelCase__ )
UpperCamelCase_: List[Any] = torch_model.model.state_dict()
UpperCamelCase_: str = {}
for k, v in tf_weights.items():
UpperCamelCase_: Dict = rename_state_dict_key(UpperCAmelCase__ )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
UpperCamelCase_: int = v.T
UpperCamelCase_: Union[str, Any] = torch.tensor(UpperCAmelCase__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
UpperCamelCase_: Tuple = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
UpperCamelCase_: int = mapping['shared.weight']
UpperCamelCase_: Union[str, Any] = mapping['shared.weight']
UpperCamelCase_: Dict = {k: torch.zeros_like(UpperCAmelCase__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**UpperCAmelCase__ )
UpperCamelCase_ ,UpperCamelCase_: Optional[int] = torch_model.model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
UpperCamelCase_: List[str] = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def snake_case (UpperCAmelCase__="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
UpperCamelCase_: Union[str, Any] = tf.train.list_variables(UpperCAmelCase__ )
UpperCamelCase_: Tuple = {}
UpperCamelCase_: Dict = ['Adafactor', 'global_step']
for name, shape in tqdm(UpperCAmelCase__ , desc='converting tf checkpoint to dict' ):
UpperCamelCase_: Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase_: Dict = tf.train.load_variable(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = array
return tf_weights
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
# save tokenizer first
UpperCamelCase_: Any = Path(UpperCAmelCase__ ).parent.name
UpperCamelCase_: Tuple = task_specific_params[F'''summarization_{dataset}''']['max_position_embeddings']
UpperCamelCase_: Optional[Any] = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=UpperCAmelCase__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase__ )
# convert model
UpperCamelCase_: Optional[Any] = get_tf_weights_as_numpy(UpperCAmelCase__ )
UpperCamelCase_: Any = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
UpperCamelCase_: Union[str, Any] = task_specific_params
UpperCamelCase_: Tuple = convert_pegasus(UpperCAmelCase__ , UpperCAmelCase__ )
torch_model.save_pretrained(UpperCAmelCase__ )
UpperCamelCase_: int = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(UpperCAmelCase__ , Path(UpperCAmelCase__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
A_ : Optional[Any] = parser.parse_args()
if args.save_dir is None:
A_ : Union[str, Any] = Path(args.tf_ckpt_path).parent.name
A_ : Optional[Any] = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir) | 292 | 1 |
"""simple docstring"""
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : List[str] ="""T5Config"""
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str ) -> jnp.ndarray:
'''simple docstring'''
lowercase = jnp.zeros_like(lowerCAmelCase__ )
lowercase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowercase = shifted_input_ids.at[:, 0].set(lowerCAmelCase__ )
lowercase = jnp.where(shifted_input_ids == -1_0_0 , lowerCAmelCase__ , lowerCAmelCase__ )
return shifted_input_ids
class _A ( __a ):
snake_case__ : List[str] = 'mt5'
snake_case__ : str = MTaConfig
class _A ( __a ):
snake_case__ : List[str] = 'mt5'
snake_case__ : Dict = MTaConfig
class _A ( __a ):
snake_case__ : str = 'mt5'
snake_case__ : Optional[int] = MTaConfig
| 197 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
UpperCamelCase__ = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
@lru_cache()
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : int = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
UpperCAmelCase__ : Optional[int] = bs[:]
UpperCAmelCase__ : List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__ : Any = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : str = set()
UpperCAmelCase__ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Optional[int] = char
return pairs
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , _A : Optional[int] , _A : List[Any] , _A : int="replace" , _A : List[Any]="<s>" , _A : List[Any]="</s>" , _A : List[Any]="</s>" , _A : Optional[int]="<s>" , _A : List[str]="<unk>" , _A : List[str]="<pad>" , _A : Union[str, Any]="<mask>" , _A : Any=False , **_A : Dict , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token
UpperCAmelCase__ : Any = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token
UpperCAmelCase__ : Optional[int] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token
UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token
UpperCAmelCase__ : int = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token
UpperCAmelCase__ : Optional[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Optional[int] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , )
with open(_A , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase__ : Optional[Any] = json.load(_A )
UpperCAmelCase__ : Any = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ : List[str] = errors # how to handle errors in decoding
UpperCAmelCase__ : str = bytes_to_unicode()
UpperCAmelCase__ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(_A , encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase__ : str = merges_handle.read().split('''\n''' )[1:-1]
UpperCAmelCase__ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__ : List[Any] = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return len(self.encoder )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : List[Any] , _A : Tuple ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(_A )
UpperCAmelCase__ : Dict = get_pairs(_A )
if not pairs:
return token
while True:
UpperCAmelCase__ : Optional[Any] = min(_A , key=lambda _A : self.bpe_ranks.get(_A , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : str = bigram
UpperCAmelCase__ : int = []
UpperCAmelCase__ : Tuple = 0
while i < len(_A ):
try:
UpperCAmelCase__ : Optional[int] = word.index(_A , _A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : Tuple = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : Optional[Any] = tuple(_A )
UpperCAmelCase__ : List[Any] = new_word
if len(_A ) == 1:
break
else:
UpperCAmelCase__ : Union[str, Any] = get_pairs(_A )
UpperCAmelCase__ : Optional[Any] = ''' '''.join(_A )
UpperCAmelCase__ : List[Any] = word
return word
def lowercase_ ( self : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
for token in re.findall(self.pat , _A ):
UpperCAmelCase__ : str = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(''' ''' ) )
return bpe_tokens
def lowercase_ ( self : List[str] , _A : Any ):
'''simple docstring'''
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : int , _A : List[str] ):
'''simple docstring'''
return self.decoder.get(_A )
def lowercase_ ( self : Tuple , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = ''''''.join(_A )
UpperCAmelCase__ : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowercase_ ( self : int , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ : Tuple = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : Any = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + '''\n''' )
UpperCAmelCase__ : Union[str, Any] = 0
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase__ : List[str] = token_index
writer.write(''' '''.join(_A ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowercase_ ( self : str , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : List[str] = [self.cls_token_id]
UpperCAmelCase__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def lowercase_ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = [self.sep_token_id]
UpperCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : Optional[Any] , _A : Any , _A : Dict=False , **_A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
UpperCAmelCase__ : Tuple = ''' ''' + text
return (text, kwargs)
| 181 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a :Optional[Any] = logging.get_logger(__name__)
__a :Optional[Any] = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 'data2vec-text'
def __init__( self : Optional[int] , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : List[Any]=768 , UpperCAmelCase : Optional[int]=12 , UpperCAmelCase : Tuple=12 , UpperCAmelCase : Tuple=3072 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Tuple=512 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : List[Any]=1E-12 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : Any=0 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : int="absolute" , UpperCAmelCase : str=True , UpperCAmelCase : Optional[Any]=None , **UpperCAmelCase : Optional[int] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = classifier_dropout
class _a ( snake_case_ ):
"""simple docstring"""
@property
def __A ( self : Dict ):
if self.task == "multiple-choice":
A_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 352 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a :Union[str, Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 329 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->int:
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_lowercase , _lowercase ):
raise TypeError("Input value must be a 'int' type" )
return bin(_lowercase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A ( unittest.TestCase ):
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = inspect.getfile(accelerate.test_utils )
UpperCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def lowercase_ (self : List[str] ) -> Any:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowercase_ (self : str ) -> str:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowercase_ (self : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowercase_ (self : Dict ) -> str:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
UpperCAmelCase__ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase__ = Accelerator()
UpperCamelCase__ = (accelerator.state.process_index + 2, 1_0)
UpperCamelCase__ = torch.randint(0, 1_0, shape).to(accelerator.device)
UpperCamelCase__ = ''
UpperCamelCase__ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase__ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase__ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 65 | 0 |
from __future__ import annotations
from math import pi, sqrt
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __magic_name__ ( TensorFormatter[Mapping, "torch.Tensor", Mapping]):
def __init__( self : Optional[int] , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : int ) -> List[str]:
'''simple docstring'''
super().__init__(features=lowerCamelCase__ )
UpperCamelCase__ : Any = torch_tensor_kwargs
import torch # noqa import torch at initialization
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
import torch
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and column:
if all(
isinstance(lowerCamelCase__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCamelCase__ )
return column
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] ) -> int:
'''simple docstring'''
import torch
if isinstance(lowerCamelCase__ , (str, bytes, type(lowerCamelCase__ )) ):
return value
elif isinstance(lowerCamelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCamelCase__ : Tuple = {}
if isinstance(lowerCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
UpperCamelCase__ : int = {'''dtype''': torch.intaa}
elif isinstance(lowerCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCamelCase__ : Any = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCamelCase__ , PIL.Image.Image ):
UpperCamelCase__ : Optional[int] = np.asarray(lowerCamelCase__ )
return torch.tensor(lowerCamelCase__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : Any ) -> Dict:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCamelCase__ , '''__array__''' ) and not isinstance(lowerCamelCase__ , torch.Tensor ):
UpperCamelCase__ : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCamelCase__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
return self._tensorize(lowerCamelCase__ )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : dict ) -> Optional[int]:
'''simple docstring'''
return map_nested(self._recursive_tensorize , lowerCamelCase__ , map_list=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : pa.Table ) -> Mapping:
'''simple docstring'''
UpperCamelCase__ : int = self.numpy_arrow_extractor().extract_row(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = self.python_features_decoder.decode_row(lowerCamelCase__ )
return self.recursive_tensorize(lowerCamelCase__ )
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : pa.Table ) -> "torch.Tensor":
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.numpy_arrow_extractor().extract_column(lowerCamelCase__ )
UpperCamelCase__ : str = self.python_features_decoder.decode_column(lowerCamelCase__ , pa_table.column_names[0] )
UpperCamelCase__ : int = self.recursive_tensorize(lowerCamelCase__ )
UpperCamelCase__ : Tuple = self._consolidate(lowerCamelCase__ )
return column
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : pa.Table ) -> Mapping:
'''simple docstring'''
UpperCamelCase__ : Dict = self.numpy_arrow_extractor().extract_batch(lowerCamelCase__ )
UpperCamelCase__ : Any = self.python_features_decoder.decode_batch(lowerCamelCase__ )
UpperCamelCase__ : Tuple = self.recursive_tensorize(lowerCamelCase__ )
for column_name in batch:
UpperCamelCase__ : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 51 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ :List[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Tuple = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Any = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowercase__ :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 101 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 346 | 0 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCamelCase : Dict =argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'could not parse string as bool {string}' )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
lowerCamelCase : Optional[Any] =parser.parse_args()
lowerCamelCase : Any =download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 357 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCamelCase : Optional[Any] =False
class __a ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __a ( unittest.TestCase ):
def __lowercase ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : int = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCamelCase__ : Dict = torch.manual_seed(0 )
UpperCamelCase__ : str = pipe.dual_guided(
prompt="first prompt" , image=SCREAMING_SNAKE_CASE , text_to_image_strength=0.7_5 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = VersatileDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = generator.manual_seed(0 )
UpperCamelCase__ : Dict = pipe.dual_guided(
prompt="first prompt" , image=SCREAMING_SNAKE_CASE , text_to_image_strength=0.7_5 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = "cyberpunk 2077"
UpperCamelCase__ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCamelCase__ : int = torch.manual_seed(0 )
UpperCamelCase__ : Any = pipe.dual_guided(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , text_to_image_strength=0.7_5 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
UpperCamelCase__ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : Optional[int] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ : List[str] = "A painting of a squirrel eating a burger "
UpperCamelCase__ : List[Any] = torch.manual_seed(0 )
UpperCamelCase__ : Dict = pipe.text_to_image(
prompt=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCamelCase__ : Union[str, Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : Dict = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ : Any = pipe.image_variation(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , output_type="numpy" ).images
UpperCamelCase__ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : int = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 196 | 0 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a , _a , ) -> int:
super().__init__()
_A : List[Any] = value_function
_A : Union[str, Any] = unet
_A : str = scheduler
_A : Union[str, Any] = env
_A : Tuple = env.get_dataset()
_A : Optional[int] = {}
for key in self.data.keys():
try:
_A : Any = self.data[key].mean()
except: # noqa: E722
pass
_A : Union[str, Any] = {}
for key in self.data.keys():
try:
_A : Optional[Any] = self.data[key].std()
except: # noqa: E722
pass
_A : str = env.observation_space.shape[0]
_A : Union[str, Any] = env.action_space.shape[0]
def a__ ( self , _a , _a ) -> Union[str, Any]:
return (x_in - self.means[key]) / self.stds[key]
def a__ ( self , _a , _a ) -> Optional[int]:
return x_in * self.stds[key] + self.means[key]
def a__ ( self , _a ) -> List[str]:
if type(_a ) is dict:
return {k: self.to_torch(_a ) for k, v in x_in.items()}
elif torch.is_tensor(_a ):
return x_in.to(self.unet.device )
return torch.tensor(_a , device=self.unet.device )
def a__ ( self , _a , _a , _a ) -> Optional[Any]:
for key, val in cond.items():
_A : int = val.clone()
return x_in
def a__ ( self , _a , _a , _a , _a ) -> Optional[int]:
_A : Union[str, Any] = x.shape[0]
_A : Any = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
_A : Dict = torch.full((batch_size,) , _a , device=self.unet.device , dtype=torch.long )
for _ in range(_a ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_A : List[Any] = self.value_function(x.permute(0 , 2 , 1 ) , _a ).sample
_A : Optional[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
_A : Optional[int] = self.scheduler._get_variance(_a )
_A : List[Any] = torch.exp(0.5 * posterior_variance )
_A : Tuple = model_std * grad
_A : Optional[Any] = 0
_A : Any = x.detach()
_A : List[Any] = x + scale * grad
_A : Any = self.reset_xa(_a , _a , self.action_dim )
_A : str = self.unet(x.permute(0 , 2 , 1 ) , _a ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
_A : Dict = self.scheduler.step(_a , _a , _a , predict_epsilon=_a )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
_A : str = self.reset_xa(_a , _a , self.action_dim )
_A : List[Any] = self.to_torch(_a )
return x, y
def __call__( self , _a , _a=64 , _a=32 , _a=2 , _a=0.1 ) -> Any:
# normalize the observations and create batch dimension
_A : Any = self.normalize(_a , """observations""" )
_A : Optional[int] = obs[None].repeat(_a , axis=0 )
_A : List[str] = {0: self.to_torch(_a )}
_A : int = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_A : Optional[int] = randn_tensor(_a , device=self.unet.device )
_A : int = self.reset_xa(_a , _a , self.action_dim )
_A : int = self.to_torch(_a )
# run the diffusion process
_A , _A : List[Any] = self.run_diffusion(_a , _a , _a , _a )
# sort output trajectories by value
_A : Optional[int] = y.argsort(0 , descending=_a ).squeeze()
_A : Tuple = x[sorted_idx]
_A : Optional[int] = sorted_values[:, :, : self.action_dim]
_A : Union[str, Any] = actions.detach().cpu().numpy()
_A : Tuple = self.de_normalize(_a , key="""actions""" )
# select the action with the highest value
if y is not None:
_A : int = 0
else:
# if we didn't run value guiding, select a random action
_A : Optional[Any] = np.random.randint(0 , _a )
_A : Any = denorm_actions[selected_index, 0]
return denorm_actions
| 26 |
"""simple docstring"""
import sys
from collections import defaultdict
class lowerCamelCase__ :
def __init__( self ):
"""simple docstring"""
snake_case : Dict = []
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.node_position[vertex]
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Dict = pos
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
snake_case : Any = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
snake_case : Any = 2 * start + 1
else:
snake_case : Union[str, Any] = 2 * start + 2
if heap[smallest_child] < heap[start]:
snake_case , snake_case : Dict = heap[smallest_child], positions[smallest_child]
snake_case , snake_case : Any = (
heap[start],
positions[start],
)
snake_case , snake_case : str = temp, tempa
snake_case : Dict = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , SCREAMING_SNAKE_CASE )
self.top_to_bottom(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[Any] = position[index]
while index != 0:
snake_case : Dict = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
snake_case : Tuple = heap[parent]
snake_case : str = position[parent]
self.set_position(position[parent] , SCREAMING_SNAKE_CASE )
else:
snake_case : Union[str, Any] = val
snake_case : List[Any] = temp
self.set_position(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
break
snake_case : Optional[Any] = parent
else:
snake_case : Optional[int] = val
snake_case : List[Any] = temp
self.set_position(SCREAMING_SNAKE_CASE , 0 )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : List[str] = len(SCREAMING_SNAKE_CASE ) // 2 - 1
for i in range(SCREAMING_SNAKE_CASE , -1 , -1 ):
self.top_to_bottom(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Union[str, Any] = positions[0]
snake_case : List[str] = sys.maxsize
self.top_to_bottom(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
return temp
def UpperCamelCase__ ( lowercase__ : Union[str, Any] ):
snake_case : Tuple = Heap()
snake_case : List[str] = [0] * len(lowercase__ )
snake_case : Optional[int] = [-1] * len(lowercase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
snake_case : Optional[int] = [] # Heap of Distance of vertices from their neighboring vertex
snake_case : List[Any] = []
for vertex in range(len(lowercase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(lowercase__ )
heap.node_position.append(lowercase__ )
snake_case : Optional[int] = []
snake_case : Union[str, Any] = 1
snake_case : Union[str, Any] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
snake_case : List[Any] = 0
snake_case : Tuple = distance
heap.heapify(lowercase__ , lowercase__ )
for _ in range(1 , len(lowercase__ ) ):
snake_case : Optional[Any] = heap.delete_minimum(lowercase__ , lowercase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
snake_case : Optional[int] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(lowercase__ )]
):
snake_case : str = distance
heap.bottom_to_top(
lowercase__ , heap.get_position(lowercase__ ) , lowercase__ , lowercase__ )
snake_case : Optional[int] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input("Enter number of edges: ").strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 148 | 0 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCamelCase_ = 2
class __lowerCamelCase :
def __init__( self , *, # begin keyword-only arguments
lowerCamelCase="<s>" , lowerCamelCase="<pad>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase=None , ) -> Union[str, Any]:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = bos, unk, pad, eos
snake_case_ = []
snake_case_ = []
snake_case_ = {}
snake_case_ = self.add_symbol(lowerCamelCase )
snake_case_ = self.add_symbol(lowerCamelCase )
snake_case_ = self.add_symbol(lowerCamelCase )
snake_case_ = self.add_symbol(lowerCamelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCamelCase )
snake_case_ = len(self.symbols )
def __eq__( self , lowerCamelCase ) -> Union[str, Any]:
return self.indices == other.indices
def __getitem__( self , lowerCamelCase ) -> Any:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ) -> Tuple:
return len(self.symbols )
def __contains__( self , lowerCamelCase ) -> Union[str, Any]:
return sym in self.indices
@classmethod
def lowerCAmelCase_ ( cls , lowerCamelCase ) -> int:
snake_case_ = cls()
d.add_from_file(lowerCamelCase )
return d
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase=1 , lowerCamelCase=False ) -> Union[str, Any]:
if word in self.indices and not overwrite:
snake_case_ = self.indices[word]
snake_case_ = self.count[idx] + n
return idx
else:
snake_case_ = len(self.symbols )
snake_case_ = idx
self.symbols.append(lowerCamelCase )
self.count.append(lowerCamelCase )
return idx
def lowerCAmelCase_ ( self , lowerCamelCase ) -> List[str]:
return 0
def lowerCAmelCase_ ( self , lowerCamelCase ) -> List[str]:
if isinstance(lowerCamelCase , lowerCamelCase ):
try:
with open(lowerCamelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(lowerCamelCase ) )
return
snake_case_ = f.readlines()
snake_case_ = self._load_meta(lowerCamelCase )
for line in lines[indices_start_line:]:
try:
snake_case_ , snake_case_ = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
snake_case_ = True
snake_case_ , snake_case_ = line.rsplit(""" """ , 1 )
else:
snake_case_ = False
snake_case_ = int(lowerCamelCase )
snake_case_ = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(lowerCamelCase ) )
self.add_symbol(lowerCamelCase , n=lowerCamelCase , overwrite=lowerCamelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def UpperCamelCase( lowercase_ ) -> str:
'''simple docstring'''
snake_case_ = dict((re.sub(r"""@@$""" , """""" , lowercase_ ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowercase_ ), v) for k, v in d.items() )
snake_case_ = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
snake_case_ = d[k] # restore
return da
def UpperCamelCase( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if not os.path.exists(lowercase_ ):
raise ValueError(f'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
snake_case_ = os.path.join(lowercase_ , """checkpoint.pt""" )
if not os.path.isfile(lowercase_ ):
raise ValueError(f'''path to the file {checkpoint_file} does not exist!''' )
snake_case_ = torch.load(lowercase_ , map_location="""cpu""" )
snake_case_ = chkpt["""cfg"""]["""model"""]
# dicts
snake_case_ = os.path.join(lowercase_ , """dict.txt""" )
if not os.path.isfile(lowercase_ ):
raise ValueError(f'''path to the file {dict_file} does not exist!''' )
snake_case_ = Dictionary.load(lowercase_ )
snake_case_ = rewrite_dict_keys(src_dict.indices )
snake_case_ = len(lowercase_ )
snake_case_ = os.path.join(lowercase_ , VOCAB_FILES_NAMES["""vocab_file"""] )
print(f'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# merges_file (bpecodes)
snake_case_ = os.path.join(lowercase_ , """bpecodes""" )
if not os.path.isfile(lowercase_ ):
raise ValueError(f'''path to the file {bpecodes_file} does not exist!''' )
snake_case_ = os.path.join(lowercase_ , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(lowercase_ , lowercase_ )
# model config
snake_case_ = os.path.join(lowercase_ , """config.json""" )
snake_case_ = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(f'''Generating {biogpt_model_config_file}''' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# tokenizer config
snake_case_ = os.path.join(lowercase_ , lowercase_ )
snake_case_ = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(f'''Generating {biogpt_tokenizer_config_file}''' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# model
snake_case_ = chkpt["""model"""]
# remove unneeded keys
snake_case_ = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(lowercase_ , lowercase_ )
snake_case_ = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
snake_case_ = model_state_dict.pop(lowercase_ )
else:
snake_case_ = model_state_dict.pop(lowercase_ )
snake_case_ = BioGptConfig.from_pretrained(lowercase_ )
snake_case_ = BioGptForCausalLM(lowercase_ )
# check that it loads ok
model_new.load_state_dict(lowercase_ )
# save
snake_case_ = os.path.join(lowercase_ , lowercase_ )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(lowercase_ , lowercase_ )
print("""Conversion is done!""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase_ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path) | 34 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase_ = re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
lowerCamelCase_ = None
def UpperCamelCase( ) -> List[Any]:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=lowercase_ , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=lowercase_ , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def UpperCamelCase( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case_ = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def UpperCamelCase( lowercase_ ) -> Tuple:
'''simple docstring'''
def remove_articles(lowercase_ ):
return ARTICLES_REGEX.sub(""" """ , lowercase_ )
def white_space_fix(lowercase_ ):
return " ".join(text.split() )
def remove_punc(lowercase_ ):
snake_case_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase_ ) ) ) )
def UpperCamelCase( lowercase_ ) -> Dict:
'''simple docstring'''
if not s:
return []
return normalize_answer(lowercase_ ).split()
def UpperCamelCase( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
return int(normalize_answer(lowercase_ ) == normalize_answer(lowercase_ ) )
def UpperCamelCase( lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
snake_case_ = get_tokens(lowercase_ )
snake_case_ = get_tokens(lowercase_ )
snake_case_ = collections.Counter(lowercase_ ) & collections.Counter(lowercase_ )
snake_case_ = sum(common.values() )
if len(lowercase_ ) == 0 or len(lowercase_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case_ = 1.0 * num_same / len(lowercase_ )
snake_case_ = 1.0 * num_same / len(lowercase_ )
snake_case_ = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase( lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
snake_case_ = {}
snake_case_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case_ = qa["""id"""]
snake_case_ = [t for t in qa["""answers"""]["""text"""] if normalize_answer(lowercase_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case_ = [""""""]
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
snake_case_ = preds[qid]
# Take max over all gold answers
snake_case_ = max(compute_exact(lowercase_ , lowercase_ ) for a in gold_answers )
snake_case_ = max(compute_fa(lowercase_ , lowercase_ ) for a in gold_answers )
return exact_scores, fa_scores
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = {}
for qid, s in scores.items():
snake_case_ = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case_ = float(not qid_to_has_ans[qid] )
else:
snake_case_ = s
return new_scores
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=None ) -> Dict:
'''simple docstring'''
if not qid_list:
snake_case_ = len(lowercase_ )
return collections.OrderedDict(
[
("""exact""", 1_00.0 * sum(exact_scores.values() ) / total),
("""f1""", 1_00.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
snake_case_ = len(lowercase_ )
return collections.OrderedDict(
[
("""exact""", 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
for k in new_eval:
snake_case_ = new_eval[k]
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
plt.step(lowercase_ , lowercase_ , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(lowercase_ , lowercase_ , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowercase_ )
plt.savefig(lowercase_ )
plt.clf()
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> Dict:
'''simple docstring'''
snake_case_ = sorted(lowercase_ , key=lambda lowercase_ : na_probs[k] )
snake_case_ = 0.0
snake_case_ = 1.0
snake_case_ = 0.0
snake_case_ = [1.0]
snake_case_ = [0.0]
snake_case_ = 0.0
for i, qid in enumerate(lowercase_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case_ = true_pos / float(i + 1 )
snake_case_ = true_pos / float(lowercase_ )
if i == len(lowercase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowercase_ )
recalls.append(lowercase_ )
if out_image:
plot_pr_curve(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return {"ap": 1_00.0 * avg_prec}
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if out_image_dir and not os.path.exists(lowercase_ ):
os.makedirs(lowercase_ )
snake_case_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case_ = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
snake_case_ = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
snake_case_ = {k: float(lowercase_ ) for k, v in qid_to_has_ans.items()}
snake_case_ = make_precision_recall_eval(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , out_image=os.path.join(lowercase_ , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(lowercase_ , lowercase_ , """pr_exact""" )
merge_eval(lowercase_ , lowercase_ , """pr_f1""" )
merge_eval(lowercase_ , lowercase_ , """pr_oracle""" )
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if not qid_list:
return
snake_case_ = [na_probs[k] for k in qid_list]
snake_case_ = np.ones_like(lowercase_ ) / float(len(lowercase_ ) )
plt.hist(lowercase_ , weights=lowercase_ , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(lowercase_ , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case_ = num_no_ans
snake_case_ = cur_score
snake_case_ = 0.0
snake_case_ = sorted(lowercase_ , key=lambda lowercase_ : na_probs[k] )
for i, qid in enumerate(lowercase_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case_ = scores[qid]
else:
if preds[qid]:
snake_case_ = -1
else:
snake_case_ = 0
cur_score += diff
if cur_score > best_score:
snake_case_ = cur_score
snake_case_ = na_probs[qid]
return 1_00.0 * best_score / len(lowercase_ ), best_thresh
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ = find_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
snake_case_ , snake_case_ = find_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
snake_case_ = best_exact
snake_case_ = exact_thresh
snake_case_ = best_fa
snake_case_ = fa_thresh
def UpperCamelCase( ) -> Union[str, Any]:
'''simple docstring'''
with open(OPTS.data_file ) as f:
snake_case_ = json.load(lowercase_ )
snake_case_ = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
snake_case_ = json.load(lowercase_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case_ = json.load(lowercase_ )
else:
snake_case_ = {k: 0.0 for k in preds}
snake_case_ = make_qid_to_has_ans(lowercase_ ) # maps qid to True/False
snake_case_ = [k for k, v in qid_to_has_ans.items() if v]
snake_case_ = [k for k, v in qid_to_has_ans.items() if not v]
snake_case_ , snake_case_ = get_raw_scores(lowercase_ , lowercase_ )
snake_case_ = apply_no_ans_threshold(lowercase_ , lowercase_ , lowercase_ , OPTS.na_prob_thresh )
snake_case_ = apply_no_ans_threshold(lowercase_ , lowercase_ , lowercase_ , OPTS.na_prob_thresh )
snake_case_ = make_eval_dict(lowercase_ , lowercase_ )
if has_ans_qids:
snake_case_ = make_eval_dict(lowercase_ , lowercase_ , qid_list=lowercase_ )
merge_eval(lowercase_ , lowercase_ , """HasAns""" )
if no_ans_qids:
snake_case_ = make_eval_dict(lowercase_ , lowercase_ , qid_list=lowercase_ )
merge_eval(lowercase_ , lowercase_ , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , OPTS.out_image_dir )
histogram_na_prob(lowercase_ , lowercase_ , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(lowercase_ , lowercase_ , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(lowercase_ , lowercase_ )
else:
print(json.dumps(lowercase_ , indent=2 ) )
if __name__ == "__main__":
lowerCamelCase_ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main() | 34 | 1 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__UpperCAmelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=18, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, ) -> int:
UpperCamelCase : Optional[int] = size if size is not None else {"""height""": 20, """width""": 20}
UpperCamelCase : Dict = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : Union[str, Any] = num_channels
UpperCamelCase : str = image_size
UpperCamelCase : Union[str, Any] = min_resolution
UpperCamelCase : Dict = max_resolution
UpperCamelCase : Union[str, Any] = size
UpperCamelCase : Any = do_normalize
UpperCamelCase : Optional[Any] = do_convert_rgb
UpperCamelCase : List[str] = [512, 1024, 2048, 4096]
UpperCamelCase : List[Any] = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
def snake_case_ ( self ) -> Any:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[Any] = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
UpperCamelCase : str = Image.open(requests.get(SCREAMING_SNAKE_CASE_, stream=SCREAMING_SNAKE_CASE_ ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class lowerCAmelCase_ ( a_ , unittest.TestCase ):
UpperCAmelCase__ : Tuple = PixaStructImageProcessor if is_vision_available() else None
def snake_case_ ( self ) -> Any:
UpperCamelCase : Dict = PixaStructImageProcessingTester(self )
@property
def snake_case_ ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_convert_rgb' ) )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : int = self.image_processor_tester.prepare_dummy_image()
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase : Dict = 2048
UpperCamelCase : Any = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt', max_patches=SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.06_06 ), atol=1e-3, rtol=1e-3 ) )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester, equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, Image.Image )
# Test not batched input
UpperCamelCase : Optional[Any] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase : Optional[Any] = image_processor(
image_inputs[0], return_tensors='pt', max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
UpperCamelCase : Tuple = image_processor(
SCREAMING_SNAKE_CASE_, return_tensors='pt', max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, Image.Image )
# Test not batched input
UpperCamelCase : List[str] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
UpperCamelCase : Any = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = image_processor(
image_inputs[0], return_tensors='pt', max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches
UpperCamelCase : Union[str, Any] = """Hello"""
UpperCamelCase : Any = image_processor(
image_inputs[0], return_tensors='pt', max_patches=SCREAMING_SNAKE_CASE_, header_text=SCREAMING_SNAKE_CASE_ ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
UpperCamelCase : Dict = image_processor(
SCREAMING_SNAKE_CASE_, return_tensors='pt', max_patches=SCREAMING_SNAKE_CASE_, header_text=SCREAMING_SNAKE_CASE_ ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=SCREAMING_SNAKE_CASE_, numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, np.ndarray )
UpperCamelCase : Dict = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase : List[Any] = image_processor(
image_inputs[0], return_tensors='pt', max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
UpperCamelCase : Optional[int] = image_processor(
SCREAMING_SNAKE_CASE_, return_tensors='pt', max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def snake_case_ ( self ) -> str:
UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester, equal_resolution=SCREAMING_SNAKE_CASE_, torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, torch.Tensor )
# Test not batched input
UpperCamelCase : str = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase : Union[str, Any] = image_processor(
image_inputs[0], return_tensors='pt', max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
UpperCamelCase : Dict = image_processor(
SCREAMING_SNAKE_CASE_, return_tensors='pt', max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class lowerCAmelCase_ ( a_ , unittest.TestCase ):
UpperCAmelCase__ : Tuple = PixaStructImageProcessor if is_vision_available() else None
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Dict = PixaStructImageProcessingTester(self, num_channels=4 )
UpperCamelCase : List[str] = 3
@property
def snake_case_ ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_convert_rgb' ) )
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester, equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, Image.Image )
# Test not batched input
UpperCamelCase : List[str] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase : Optional[int] = image_processor(
image_inputs[0], return_tensors='pt', max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
UpperCamelCase : Union[str, Any] = image_processor(
SCREAMING_SNAKE_CASE_, return_tensors='pt', max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 119 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_A = 1_6
_A = 3_2
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase = 16 ) -> List[str]:
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase__ : List[str] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ : str = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ : Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ : Any = 8
else:
lowerCAmelCase__ : Any = None
return tokenizer.pad(
__UpperCAmelCase , padding="""longest""" , max_length=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase__ : Any = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_A = mocked_dataloaders # noqa: F811
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCAmelCase ) == "1":
lowerCAmelCase__ : List[Any] = 2
# New Code #
lowerCAmelCase__ : Tuple = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowerCAmelCase__ : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : Tuple = config["""lr"""]
lowerCAmelCase__ : int = int(config["""num_epochs"""] )
lowerCAmelCase__ : List[Any] = int(config["""seed"""] )
lowerCAmelCase__ : Tuple = int(config["""batch_size"""] )
lowerCAmelCase__ : Optional[int] = evaluate.load("""glue""" , """mrpc""" )
set_seed(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = get_dataloaders(__UpperCAmelCase , __UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ : Optional[int] = AdamW(params=model.parameters() , lr=__UpperCAmelCase )
# Instantiate scheduler
lowerCAmelCase__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Now we train the model
for epoch in range(__UpperCAmelCase ):
model.train()
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCAmelCase ):
lowerCAmelCase__ : str = model(**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = output.loss
accelerator.backward(__UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ : int = model(**__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCAmelCase , references=__UpperCAmelCase , )
lowerCAmelCase__ : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __UpperCAmelCase )
def lowercase_ ( ) -> Any:
lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCAmelCase , default=__UpperCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__UpperCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase__ : List[str] = parser.parse_args()
lowerCAmelCase__ : Any = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 242 | 0 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self : str , lowerCamelCase__ : int , lowerCamelCase__ : Tuple=13 , lowerCamelCase__ : Any=7 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : int=5 , lowerCamelCase__ : List[str]=4 , lowerCamelCase__ : int=37 , lowerCamelCase__ : int="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=512 , lowerCamelCase__ : Dict=16 , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : Any=4 , lowerCamelCase__ : Union[str, Any]=None , ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : Optional[int] = seq_length
UpperCamelCase__ : str = is_training
UpperCamelCase__ : int = use_input_mask
UpperCamelCase__ : Dict = use_token_type_ids
UpperCamelCase__ : Optional[int] = use_labels
UpperCamelCase__ : Tuple = vocab_size
UpperCamelCase__ : str = hidden_size
UpperCamelCase__ : Optional[int] = num_hidden_layers
UpperCamelCase__ : Union[str, Any] = num_attention_heads
UpperCamelCase__ : Optional[Any] = intermediate_size
UpperCamelCase__ : Optional[int] = hidden_act
UpperCamelCase__ : Tuple = hidden_dropout_prob
UpperCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase__ : Union[str, Any] = max_position_embeddings
UpperCamelCase__ : Union[str, Any] = type_vocab_size
UpperCamelCase__ : Union[str, Any] = type_sequence_label_size
UpperCamelCase__ : Union[str, Any] = initializer_range
UpperCamelCase__ : List[Any] = num_labels
UpperCamelCase__ : Dict = num_choices
UpperCamelCase__ : Tuple = scope
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : str = None
if self.use_input_mask:
UpperCamelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : Dict = None
if self.use_token_type_ids:
UpperCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ : Any = None
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Tuple = None
if self.use_labels:
UpperCamelCase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = BioGptModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : str = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ : Dict = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Tuple = BioGptForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : Optional[int] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , *lowerCamelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : int = BioGptModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# create attention mask
UpperCamelCase__ : Union[str, Any] = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase__ )
UpperCamelCase__ : str = self.seq_length // 2
UpperCamelCase__ : Dict = 0
# first forward pass
UpperCamelCase__ : List[str] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCamelCase__ : List[Any] = ids_tensor((1,) , lowerCamelCase__ ).item() + 1
UpperCamelCase__ : List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCamelCase__ : Dict = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCamelCase__ : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ : str = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCamelCase__ )] , dim=1 , )
# get two different outputs
UpperCamelCase__ : Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )['''last_hidden_state''']
UpperCamelCase__ : Optional[int] = model(lowerCamelCase__ , past_key_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ )['''last_hidden_state''']
# select random slice
UpperCamelCase__ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ : int = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCamelCase__ : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , *lowerCamelCase__ : str ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = BioGptModel(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
UpperCamelCase__ : Any = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase__ )
# first forward pass
UpperCamelCase__ : str = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ )
UpperCamelCase__ : List[str] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ : Optional[Any] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase__ : int = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )['''last_hidden_state''']
UpperCamelCase__ : int = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ )[
'''last_hidden_state'''
]
# select random slice
UpperCamelCase__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Any , *lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple=False ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : List[str] = BioGptForCausalLM(lowerCamelCase__ )
model.to(lowerCamelCase__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCamelCase__ : List[str] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : Any , *lowerCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : Tuple = BioGptModel(lowerCamelCase__ )
UpperCamelCase__ : List[Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : str , *lowerCamelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : List[str] = self.num_labels
UpperCamelCase__ : List[Any] = BioGptForTokenClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : str = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : str = self.prepare_config_and_inputs()
(
UpperCamelCase__
) : int = config_and_inputs
UpperCamelCase__ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
A: List[Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
A: Union[str, Any] = (BioGptForCausalLM,) if is_torch_available() else ()
A: Optional[Any] = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
A: Optional[Any] = False
def UpperCAmelCase__ ( self : int ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = BioGptModelTester(self )
UpperCamelCase__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ : Optional[int] = type
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCamelCase__ )
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCamelCase__ , gradient_checkpointing=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCamelCase__ )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCamelCase__ )
@slow
def UpperCAmelCase__ ( self : str ) -> int:
'''simple docstring'''
UpperCamelCase__ : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(lowerCamelCase__ )
UpperCamelCase__ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCamelCase__ : int = '''left'''
# Define PAD Token = EOS Token = 50256
UpperCamelCase__ : Tuple = tokenizer.eos_token
UpperCamelCase__ : Optional[Any] = model.config.eos_token_id
# use different length sentences to test batching
UpperCamelCase__ : str = [
'''Hello, my dog is a little''',
'''Today, I''',
]
UpperCamelCase__ : Union[str, Any] = tokenizer(lowerCamelCase__ , return_tensors='''pt''' , padding=lowerCamelCase__ )
UpperCamelCase__ : Any = inputs['''input_ids'''].to(lowerCamelCase__ )
UpperCamelCase__ : List[str] = model.generate(
input_ids=lowerCamelCase__ , attention_mask=inputs['''attention_mask'''].to(lowerCamelCase__ ) , )
UpperCamelCase__ : int = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = model.generate(input_ids=lowerCamelCase__ )
UpperCamelCase__ : List[str] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
UpperCamelCase__ : Optional[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = model.generate(input_ids=lowerCamelCase__ , max_length=model.config.max_length - num_paddings )
UpperCamelCase__ : Dict = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
UpperCamelCase__ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase__ )
UpperCamelCase__ : int = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase__ )
UpperCamelCase__ : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : str = BioGptModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCAmelCase__ ( self : str ) -> str:
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Optional[Any] = 3
UpperCamelCase__ : int = input_dict['''input_ids''']
UpperCamelCase__ : int = input_ids.ne(1 ).to(lowerCamelCase__ )
UpperCamelCase__ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase__ : Tuple = BioGptForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : Optional[int] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Dict = 3
UpperCamelCase__ : Tuple = '''multi_label_classification'''
UpperCamelCase__ : int = input_dict['''input_ids''']
UpperCamelCase__ : List[str] = input_ids.ne(1 ).to(lowerCamelCase__ )
UpperCamelCase__ : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase__ : Any = BioGptForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __magic_name__ ( unittest.TestCase):
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Dict = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
UpperCamelCase__ : Any = torch.tensor([[2, 4805, 9, 656, 21]] )
UpperCamelCase__ : List[Any] = model(lowerCamelCase__ )[0]
UpperCamelCase__ : Any = 42384
UpperCamelCase__ : Dict = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCamelCase__ )
UpperCamelCase__ : Dict = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCamelCase__ : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(lowerCamelCase__ )
torch.manual_seed(0 )
UpperCamelCase__ : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(lowerCamelCase__ )
UpperCamelCase__ : int = model.generate(
**lowerCamelCase__ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=lowerCamelCase__ , )
UpperCamelCase__ : str = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase__ )
UpperCamelCase__ : List[str] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 364 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __magic_name__ ( unittest.TestCase):
def __init__( self : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple=7 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : Optional[int]=18 , lowerCamelCase__ : Any=30 , lowerCamelCase__ : int=400 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : int=False , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Dict=[0.5, 0.5, 0.5] , lowerCamelCase__ : str=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = parent
UpperCamelCase__ : Dict = batch_size
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : int = image_size
UpperCamelCase__ : str = min_resolution
UpperCamelCase__ : str = max_resolution
UpperCamelCase__ : Tuple = do_resize
UpperCamelCase__ : str = size if size is not None else {'''height''': 18, '''width''': 20}
UpperCamelCase__ : Optional[Any] = do_thumbnail
UpperCamelCase__ : int = do_align_axis
UpperCamelCase__ : List[Any] = do_pad
UpperCamelCase__ : List[Any] = do_normalize
UpperCamelCase__ : Dict = image_mean
UpperCamelCase__ : List[Any] = image_std
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
A: Tuple = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : str ) -> int:
'''simple docstring'''
UpperCamelCase__ : int = DonutImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_thumbnail''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_pad''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_std''' ) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
UpperCamelCase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def UpperCAmelCase__ ( self : Any ) -> str:
'''simple docstring'''
pass
@is_flaky()
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
UpperCamelCase__ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase__ : List[str] = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
UpperCamelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase__ : List[Any] = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def UpperCAmelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
UpperCamelCase__ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase__ : List[str] = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 51 | 0 |
from __future__ import annotations
def A ( _SCREAMING_SNAKE_CASE ) -> int:
# preprocessing the first row
for i in range(1 ,len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 ,len(_SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 ,len(_SCREAMING_SNAKE_CASE ) ):
for j in range(1 ,len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
a :Optional[Any] = logging.get_logger(__name__)
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , *_a , **_a ) -> None:
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , _a , )
super().__init__(*_a , **_a )
| 132 | 0 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
_UpperCAmelCase = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
_UpperCAmelCase = {
'allenai/led-base-16384': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
__lowerCAmelCase : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__lowerCAmelCase : Optional[int] = bs[:]
__lowerCAmelCase : List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
__lowerCAmelCase : Tuple = [chr(SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str ) -> Union[str, Any]:
__lowerCAmelCase : List[Any] = set()
__lowerCAmelCase : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase : str = char
return pairs
class snake_case_ ( __lowercase ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
def __init__( self : Dict , _snake_case : List[str] , _snake_case : Dict , _snake_case : Tuple="replace" , _snake_case : List[Any]="<s>" , _snake_case : Optional[int]="</s>" , _snake_case : Tuple="</s>" , _snake_case : Any="<s>" , _snake_case : str="<unk>" , _snake_case : Dict="<pad>" , _snake_case : Dict="<mask>" , _snake_case : str=False , **_snake_case : str , )->Dict:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else bos_token
__lowerCAmelCase : List[str] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else eos_token
__lowerCAmelCase : List[str] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else sep_token
__lowerCAmelCase : int = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else cls_token
__lowerCAmelCase : Union[str, Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else unk_token
__lowerCAmelCase : Union[str, Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase : Any = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
errors=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , add_prefix_space=_snake_case , **_snake_case , )
with open(_snake_case , encoding="""utf-8""" ) as vocab_handle:
__lowerCAmelCase : Tuple = json.load(_snake_case )
__lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
__lowerCAmelCase : List[str] = errors # how to handle errors in decoding
__lowerCAmelCase : Dict = bytes_to_unicode()
__lowerCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(_snake_case , encoding="""utf-8""" ) as merges_handle:
__lowerCAmelCase : Dict = merges_handle.read().split("""\n""" )[1:-1]
__lowerCAmelCase : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCAmelCase : List[str] = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__lowerCAmelCase : str = {}
__lowerCAmelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCAmelCase : Union[str, Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCAmelCase__ ( self : str )->List[str]:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase__ ( self : str )->Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : Tuple )->List[str]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowerCAmelCase : List[Any] = tuple(_snake_case )
__lowerCAmelCase : Union[str, Any] = get_pairs(_snake_case )
if not pairs:
return token
while True:
__lowerCAmelCase : List[Any] = min(_snake_case , key=lambda _snake_case : self.bpe_ranks.get(_snake_case , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = bigram
__lowerCAmelCase : Tuple = []
__lowerCAmelCase : Optional[int] = 0
while i < len(_snake_case ):
try:
__lowerCAmelCase : Any = word.index(_snake_case , _snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCAmelCase : Any = j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase : Optional[int] = tuple(_snake_case )
__lowerCAmelCase : Optional[int] = new_word
if len(_snake_case ) == 1:
break
else:
__lowerCAmelCase : Optional[int] = get_pairs(_snake_case )
__lowerCAmelCase : Dict = """ """.join(_snake_case )
__lowerCAmelCase : Union[str, Any] = word
return word
def UpperCAmelCase__ ( self : List[Any] , _snake_case : Optional[int] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = []
for token in re.findall(self.pat , _snake_case ):
__lowerCAmelCase : Dict = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_snake_case ).split(""" """ ) )
return bpe_tokens
def UpperCAmelCase__ ( self : Tuple , _snake_case : List[str] )->Tuple:
'''simple docstring'''
return self.encoder.get(_snake_case , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self : List[str] , _snake_case : List[str] )->Union[str, Any]:
'''simple docstring'''
return self.decoder.get(_snake_case )
def UpperCAmelCase__ ( self : List[str] , _snake_case : List[Any] )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Dict = """""".join(_snake_case )
__lowerCAmelCase : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCAmelCase__ ( self : List[Any] , _snake_case : str , _snake_case : Optional[str] = None )->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase : int = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase : List[str] = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_snake_case , ensure_ascii=_snake_case ) + """\n""" )
__lowerCAmelCase : List[str] = 0
with open(_snake_case , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
__lowerCAmelCase : Dict = token_index
writer.write(""" """.join(_snake_case ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase__ ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None )->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase : List[str] = [self.cls_token_id]
__lowerCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False )->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
def UpperCAmelCase__ ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None )->List[int]:
'''simple docstring'''
__lowerCAmelCase : Any = [self.sep_token_id]
__lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : Dict , _snake_case : List[str] , _snake_case : Optional[Any]=False , **_snake_case : int )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_snake_case ) > 0 and not text[0].isspace()):
__lowerCAmelCase : Union[str, Any] = """ """ + text
return (text, kwargs)
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , _snake_case : Optional[int] = None , _snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , )->dict:
'''simple docstring'''
__lowerCAmelCase : Tuple = super()._pad(
encoded_inputs=_snake_case , max_length=_snake_case , padding_strategy=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , )
# Load from model defaults
if return_attention_mask is None:
__lowerCAmelCase : Union[str, Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowerCAmelCase : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowerCAmelCase : Dict = len(encoded_inputs["""global_attention_mask"""] ) != len(_snake_case )
if needs_to_be_padded:
__lowerCAmelCase : Optional[int] = len(_snake_case ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowerCAmelCase : Any = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__lowerCAmelCase : Union[str, Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs | 232 |
from __future__ import annotations
import time
import numpy as np
_UpperCAmelCase = [8, 5, 9, 7]
_UpperCAmelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_UpperCAmelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case_ :
def __init__( self : Union[str, Any] , _snake_case : list[int] , _snake_case : list[list[int]] , _snake_case : list[list[int]] , )->None:
'''simple docstring'''
__lowerCAmelCase : str = claim_vector
__lowerCAmelCase : List[Any] = allocated_resources_table
__lowerCAmelCase : str = maximum_claim_table
def UpperCAmelCase__ ( self : Tuple )->list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCAmelCase__ ( self : int )->list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCAmelCase__ ( self : Optional[int] )->list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCAmelCase__ ( self : Union[str, Any] )->dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(_snake_case ): i for i in self.__need()}
def UpperCAmelCase__ ( self : Dict , **_snake_case : Optional[Any] )->None:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.__need()
__lowerCAmelCase : Any = self.__allocated_resources_table
__lowerCAmelCase : List[Any] = self.__available_resources()
__lowerCAmelCase : Optional[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
__lowerCAmelCase : Optional[Any] = False
for each_need in need_list:
__lowerCAmelCase : Optional[int] = True
for index, need in enumerate(_snake_case ):
if need > available_resources[index]:
__lowerCAmelCase : int = False
break
if execution:
__lowerCAmelCase : int = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase : Any = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(_snake_case )
# update available/freed resources stack
__lowerCAmelCase : int = np.array(_snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(_snake_case ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def UpperCAmelCase__ ( self : List[Any] )->int:
'''simple docstring'''
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(_snake_case ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(_snake_case ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(_snake_case ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(_snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 232 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case__ ( UpperCamelCase):
a_ = "deformable_detr"
a_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Union[str, Any] , _A : int=True , _A : Optional[Any]=None , _A : str=3 , _A : Dict=3_00 , _A : Dict=10_24 , _A : Any=6 , _A : List[Any]=10_24 , _A : Tuple=8 , _A : List[Any]=6 , _A : Dict=10_24 , _A : Any=8 , _A : Optional[Any]=0.0 , _A : List[Any]=True , _A : Any="relu" , _A : int=2_56 , _A : Optional[int]=0.1 , _A : List[str]=0.0 , _A : int=0.0 , _A : Tuple=0.02 , _A : Any=1.0 , _A : Any=True , _A : List[str]=False , _A : Optional[Any]="sine" , _A : List[str]="resnet50" , _A : str=True , _A : str=False , _A : Dict=4 , _A : List[Any]=4 , _A : Union[str, Any]=4 , _A : List[Any]=False , _A : Tuple=3_00 , _A : Any=False , _A : List[Any]=1 , _A : List[str]=5 , _A : Optional[int]=2 , _A : Optional[Any]=1 , _A : int=1 , _A : Dict=5 , _A : int=2 , _A : int=0.1 , _A : Tuple=0.25 , _A : int=False , **_A : List[str] , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCAmelCase_ : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_A , _A ):
UpperCAmelCase_ : Optional[Any] = backbone_config.get('''model_type''' )
UpperCAmelCase_ : Any = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Optional[Any] = config_class.from_dict(_A )
UpperCAmelCase_ : int = use_timm_backbone
UpperCAmelCase_ : List[Any] = backbone_config
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Union[str, Any] = num_queries
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : Optional[int] = d_model
UpperCAmelCase_ : Dict = encoder_ffn_dim
UpperCAmelCase_ : List[str] = encoder_layers
UpperCAmelCase_ : List[Any] = encoder_attention_heads
UpperCAmelCase_ : Optional[int] = decoder_ffn_dim
UpperCAmelCase_ : Union[str, Any] = decoder_layers
UpperCAmelCase_ : Tuple = decoder_attention_heads
UpperCAmelCase_ : Tuple = dropout
UpperCAmelCase_ : int = attention_dropout
UpperCAmelCase_ : Union[str, Any] = activation_dropout
UpperCAmelCase_ : int = activation_function
UpperCAmelCase_ : List[str] = init_std
UpperCAmelCase_ : List[str] = init_xavier_std
UpperCAmelCase_ : List[Any] = encoder_layerdrop
UpperCAmelCase_ : Optional[int] = auxiliary_loss
UpperCAmelCase_ : Dict = position_embedding_type
UpperCAmelCase_ : List[Any] = backbone
UpperCAmelCase_ : Any = use_pretrained_backbone
UpperCAmelCase_ : List[str] = dilation
# deformable attributes
UpperCAmelCase_ : Dict = num_feature_levels
UpperCAmelCase_ : List[Any] = encoder_n_points
UpperCAmelCase_ : List[str] = decoder_n_points
UpperCAmelCase_ : Optional[Any] = two_stage
UpperCAmelCase_ : Dict = two_stage_num_proposals
UpperCAmelCase_ : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
UpperCAmelCase_ : Any = class_cost
UpperCAmelCase_ : Optional[Any] = bbox_cost
UpperCAmelCase_ : List[str] = giou_cost
# Loss coefficients
UpperCAmelCase_ : Any = mask_loss_coefficient
UpperCAmelCase_ : Optional[Any] = dice_loss_coefficient
UpperCAmelCase_ : Dict = bbox_loss_coefficient
UpperCAmelCase_ : Tuple = giou_loss_coefficient
UpperCAmelCase_ : Dict = eos_coefficient
UpperCAmelCase_ : List[Any] = focal_alpha
UpperCAmelCase_ : Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=_A , **_A )
@property
def A ( self : Any ) -> int:
return self.encoder_attention_heads
@property
def A ( self : Union[str, Any] ) -> int:
return self.d_model
def A ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCAmelCase_ : List[str] = self.backbone_config.to_dict()
UpperCAmelCase_ : List[Any] = self.__class__.model_type
return output
| 304 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = StableDiffusionDiffEditPipeline
a_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
a_ = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a_ = frozenset([])
def A ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , )
UpperCAmelCase_ : Optional[int] = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_zero=_A , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A ( self : str , _A : List[str] , _A : Any=0 ) -> str:
UpperCAmelCase_ : Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Any = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : str = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : Tuple , _A : Optional[Any] , _A : Optional[Any]=0 ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : int = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Dict = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Any = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[Any] = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : int , _A : Tuple , _A : List[str]=0 ) -> Any:
UpperCAmelCase_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[int] = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : List[str] ) -> Optional[Any]:
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Any = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_A )
UpperCAmelCase_ : str = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase_ : Any = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"`{optional_component}` did not stay set to None after loading." , )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_A )
UpperCAmelCase_ : List[Any] = pipe_loaded(**_A )[0]
UpperCAmelCase_ : Any = np.abs(output - output_loaded ).max()
self.assertLess(_A , 1e-4 )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : Optional[Any] = '''cpu'''
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_mask_inputs(_A )
UpperCAmelCase_ : int = pipe.generate_mask(**_A )
UpperCAmelCase_ : Tuple = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase_ : List[Any] = np.array([0] * 9 )
UpperCAmelCase_ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def A ( self : str ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = '''cpu'''
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : str = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : List[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : int = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
def A ( self : Tuple ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : Any = '''cpu'''
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
UpperCAmelCase_ : Any = DPMSolverMultistepScheduler(**_A )
UpperCAmelCase_ : Optional[Any] = DPMSolverMultistepInverseScheduler(**_A )
UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : List[Any] = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
@require_torch_gpu
@slow
class snake_case__ ( unittest.TestCase):
def A ( self : Optional[Any] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def A ( cls : Dict ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
UpperCAmelCase_ : int = raw_image.convert('''RGB''' ).resize((7_68, 7_68) )
UpperCAmelCase_ : Any = raw_image
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase_ : int = torch.manual_seed(0 )
UpperCAmelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : List[str] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Tuple = '''a bowl of pears'''
UpperCAmelCase_ : Optional[int] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[str] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A ).latents
UpperCAmelCase_ : Any = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : str = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
def A ( self : Tuple ) -> List[str]:
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Any = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Dict = '''a bowl of pears'''
UpperCAmelCase_ : Union[str, Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[Any] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A , num_inference_steps=25 , ).latents
UpperCAmelCase_ : Dict = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : Tuple = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 304 | 1 |
"""simple docstring"""
import sys
from collections import defaultdict
class __snake_case :
def __init__( self) -> List[Any]:
'''simple docstring'''
a__: List[Any] = []
def lowerCamelCase_ ( self , lowercase) -> Optional[Any]:
'''simple docstring'''
return self.node_position[vertex]
def lowerCamelCase_ ( self , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__: str = pos
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
a__: List[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
a__: Dict = 2 * start + 1
else:
a__: List[str] = 2 * start + 2
if heap[smallest_child] < heap[start]:
a__: Tuple = heap[smallest_child], positions[smallest_child]
a__: List[str] = (
heap[start],
positions[start],
)
a__: int = temp, tempa
a__: Any = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child] , self.get_position(positions[start]))
self.set_position(positions[start] , lowercase_)
self.top_to_bottom(lowercase_ , lowercase_ , lowercase_ , lowercase_)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__: List[Any] = position[index]
while index != 0:
a__: str = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
a__: Any = heap[parent]
a__: Tuple = position[parent]
self.set_position(position[parent] , lowercase_)
else:
a__: Optional[int] = val
a__: Tuple = temp
self.set_position(lowercase_ , lowercase_)
break
a__: Dict = parent
else:
a__: Optional[Any] = val
a__: Tuple = temp
self.set_position(lowercase_ , 0)
def lowerCamelCase_ ( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__: Tuple = len(lowercase_) // 2 - 1
for i in range(lowercase_ , -1 , -1):
self.top_to_bottom(lowercase_ , lowercase_ , len(lowercase_) , lowercase_)
def lowerCamelCase_ ( self , lowercase , lowercase) -> Tuple:
'''simple docstring'''
a__: List[str] = positions[0]
a__: Dict = sys.maxsize
self.top_to_bottom(lowercase_ , 0 , len(lowercase_) , lowercase_)
return temp
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: Any = Heap()
a__: List[str] = [0] * len(__lowerCamelCase )
a__: Any = [-1] * len(__lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
a__: Any = [] # Heap of Distance of vertices from their neighboring vertex
a__: Optional[Any] = []
for vertex in range(len(__lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__lowerCamelCase )
heap.node_position.append(__lowerCamelCase )
a__: Tuple = []
a__: List[str] = 1
a__: Tuple = sys.maxsize
for neighbor, distance in adjacency_list[0]:
a__: Dict = 0
a__: List[Any] = distance
heap.heapify(__lowerCamelCase , __lowerCamelCase )
for _ in range(1 , len(__lowerCamelCase ) ):
a__: List[str] = heap.delete_minimum(__lowerCamelCase , __lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
a__: Optional[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__lowerCamelCase )]
):
a__: Any = distance
heap.bottom_to_top(
__lowerCamelCase , heap.get_position(__lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase )
a__: Tuple = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowercase__ = int(input('Enter number of edges: ').strip())
lowercase__ = defaultdict(list)
for _ in range(edges_number):
lowercase__ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 351 | """simple docstring"""
import unittest
from knapsack import knapsack as k
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: List[Any] = 0
a__: Dict = [0]
a__: int = [0]
a__: Optional[Any] = len(lowercase)
self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase) , 0)
a__: str = [60]
a__: Dict = [10]
a__: List[str] = len(lowercase)
self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase) , 0)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: int = 3
a__: str = [1, 2, 3]
a__: Dict = [3, 2, 1]
a__: Optional[int] = len(lowercase)
self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase) , 5)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Any = 50
a__: Optional[int] = [60, 1_00, 1_20]
a__: str = [10, 20, 30]
a__: int = len(lowercase)
self.assertEqual(k.knapsack(lowercase , lowercase , lowercase , lowercase) , 2_20)
if __name__ == "__main__":
unittest.main()
| 203 | 0 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = tmp_path / '''file.csv'''
snake_case_ = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = tmp_path / '''malformed_file.csv'''
snake_case_ = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = tmp_path / '''csv_with_image.csv'''
snake_case_ = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = tmp_path / '''csv_with_label.csv'''
snake_case_ = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = tmp_path / '''csv_with_int_list.csv'''
snake_case_ = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = Csv()
snake_case_ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(SCREAMING_SNAKE_CASE__ , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(SCREAMING_SNAKE_CASE__ ) in record.message
for record in caplog.records )
@require_pil
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as f:
snake_case_ = f.read().splitlines()[1]
snake_case_ = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
snake_case_ = csv._generate_tables([[csv_file_with_image]] )
snake_case_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
snake_case_ = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as f:
snake_case_ = f.read().splitlines()[1:]
snake_case_ = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
snake_case_ = csv._generate_tables([[csv_file_with_label]] )
snake_case_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
snake_case_ = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(SCREAMING_SNAKE_CASE__ ) for label in labels]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda SCREAMING_SNAKE_CASE__ : [int(SCREAMING_SNAKE_CASE__ ) for i in x.split()]} )
snake_case_ = csv._generate_tables([[csv_file_with_int_list]] )
snake_case_ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
snake_case_ = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]] | 8 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 8 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : torch.FloatTensor
class UpperCAmelCase_ ( _UpperCamelCase , _UpperCamelCase ):
@register_to_config
def __init__( self : Any , A : int = 3 , A : int = 3 , A : Tuple[str] = ("DownEncoderBlock2D",) , A : Tuple[str] = ("UpDecoderBlock2D",) , A : Tuple[int] = (6_4,) , A : int = 1 , A : str = "silu" , A : int = 3 , A : int = 3_2 , A : int = 2_5_6 , A : int = 3_2 , A : Optional[int] = None , A : float = 0.18_215 , A : str = "group" , ):
super().__init__()
# pass init params to Encoder
_UpperCAmelCase : int = Encoder(
in_channels=A , out_channels=A , down_block_types=A , block_out_channels=A , layers_per_block=A , act_fn=A , norm_num_groups=A , double_z=A , )
_UpperCAmelCase : Optional[int] = vq_embed_dim if vq_embed_dim is not None else latent_channels
_UpperCAmelCase : Union[str, Any] = nn.Convad(A , A , 1 )
_UpperCAmelCase : int = VectorQuantizer(A , A , beta=0.25 , remap=A , sane_index_shape=A )
_UpperCAmelCase : str = nn.Convad(A , A , 1 )
# pass init params to Decoder
_UpperCAmelCase : Any = Decoder(
in_channels=A , out_channels=A , up_block_types=A , block_out_channels=A , layers_per_block=A , act_fn=A , norm_num_groups=A , norm_type=A , )
@apply_forward_hook
def snake_case_ ( self : List[str] , A : torch.FloatTensor , A : bool = True ):
_UpperCAmelCase : List[Any] = self.encoder(A )
_UpperCAmelCase : Optional[Any] = self.quant_conv(A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=A )
@apply_forward_hook
def snake_case_ ( self : Any , A : torch.FloatTensor , A : bool = False , A : bool = True ):
# also go through quantization layer
if not force_not_quantize:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = self.quantize(A )
else:
_UpperCAmelCase : Union[str, Any] = h
_UpperCAmelCase : Tuple = self.post_quant_conv(A )
_UpperCAmelCase : str = self.decoder(A , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
def snake_case_ ( self : List[str] , A : torch.FloatTensor , A : bool = True ):
_UpperCAmelCase : Optional[int] = sample
_UpperCAmelCase : str = self.encode(A ).latents
_UpperCAmelCase : Optional[Any] = self.decode(A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
| 202 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_lowerCAmelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : int = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
_lowerCAmelCase : List[Any] = {
"google/electra-small-generator": 5_12,
"google/electra-base-generator": 5_12,
"google/electra-large-generator": 5_12,
"google/electra-small-discriminator": 5_12,
"google/electra-base-discriminator": 5_12,
"google/electra-large-discriminator": 5_12,
}
_lowerCAmelCase : Optional[Any] = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Tuple = ElectraTokenizer
def __init__( self : Dict , A : Dict=None , A : Optional[int]=None , A : Dict=True , A : Optional[Any]="[UNK]" , A : Any="[SEP]" , A : str="[PAD]" , A : Tuple="[CLS]" , A : Optional[Any]="[MASK]" , A : Any=True , A : Tuple=None , **A : Any , ):
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
_UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , A ) != do_lower_case
or normalizer_state.get("strip_accents" , A ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , A ) != tokenize_chinese_chars
):
_UpperCAmelCase : Union[str, Any] = getattr(A , normalizer_state.pop("type" ) )
_UpperCAmelCase : Dict = do_lower_case
_UpperCAmelCase : Optional[int] = strip_accents
_UpperCAmelCase : Any = tokenize_chinese_chars
_UpperCAmelCase : Optional[Any] = normalizer_class(**A )
_UpperCAmelCase : int = do_lower_case
def snake_case_ ( self : Tuple , A : str , A : int=None ):
_UpperCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self : Any , A : List[int] , A : Optional[List[int]] = None ):
_UpperCAmelCase : Any = [self.sep_token_id]
_UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self : Any , A : str , A : Optional[str] = None ):
_UpperCAmelCase : List[Any] = self._tokenizer.model.save(A , name=A )
return tuple(A )
| 202 | 1 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:]
_UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
_UpperCAmelCase : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=13 , A_=3 , A_=224 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
UpperCamelCase : Any = size if size is not None else {"height": 18, "width": 18}
UpperCamelCase : Tuple = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : Tuple = num_channels
UpperCamelCase : str = image_size
UpperCamelCase : Optional[int] = min_resolution
UpperCamelCase : List[Any] = max_resolution
UpperCamelCase : Union[str, Any] = do_resize
UpperCamelCase : str = size
UpperCamelCase : List[str] = do_normalize
UpperCamelCase : Any = image_mean
UpperCamelCase : int = image_std
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Tuple = ViTImageProcessor if is_vision_available() else None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = EfficientFormerImageProcessorTester(self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCamelCase : Tuple = image_processor(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase : Dict = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCamelCase : Optional[Any] = image_processor(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase : Any = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCamelCase : Any = image_processor(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 140 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowerCamelCase : Optional[int] = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def A_ ( _lowerCAmelCase , _lowerCAmelCase=None ) -> Optional[Any]:
require_version(deps[pkg] , _lowerCAmelCase )
| 140 | 1 |
"""simple docstring"""
def A__ ( UpperCamelCase ):
return " ".join(
"".join(word[::-1] ) if len(UpperCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 292 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Dict , __UpperCamelCase :WhisperForConditionalGeneration , __UpperCamelCase :WhisperProcessor , __UpperCamelCase :AutoencoderKL , __UpperCamelCase :CLIPTextModel , __UpperCamelCase :CLIPTokenizer , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase :StableDiffusionSafetyChecker , __UpperCamelCase :CLIPImageProcessor , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def lowerCamelCase ( self :Tuple ):
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Dict=1_60_00 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 50 , __UpperCamelCase :float = 7.5 , __UpperCamelCase :Optional[Union[str, List[str]]] = None , __UpperCamelCase :Optional[int] = 1 , __UpperCamelCase :float = 0.0 , __UpperCamelCase :Optional[torch.Generator] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase :int = 1 , **__UpperCamelCase :Dict , ):
A = self.speech_processor.feature_extractor(
__UpperCamelCase , return_tensors="pt" , sampling_rate=__UpperCamelCase ).input_features.to(self.device )
A = self.speech_model.generate(__UpperCamelCase , max_length=48_00_00 )
A = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[
0
]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = len(__UpperCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__UpperCamelCase )}." )
# get prompt text embeddings
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A = text_input_ids[:, : self.tokenizer.model_max_length]
A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A, A, A = text_embeddings.shape
A = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A = 42
if negative_prompt is None:
A = [""] * batch_size
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="
f" {type(__UpperCamelCase )}." )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A = negative_prompt
A = text_input_ids.shape[-1]
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" , )
A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A = uncond_embeddings.shape[1]
A = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to(
self.device )
else:
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A = {}
if accepts_eta:
A = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
A = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
A, A = noise_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = 1 / 0.18_215 * latents
A = self.vae.decode(__UpperCamelCase ).sample
A = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 292 | 1 |
import math
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_=0 ): # a graph with Node 0,1,...,N-1
'''simple docstring'''
lowerCamelCase__ : Optional[int] = n
lowerCamelCase__ : Union[str, Any] = [
[math.inf for j in range(0, _snake_case )] for i in range(0, _snake_case )
] # adjacency matrix for weight
lowerCamelCase__ : Dict = [
[math.inf for j in range(0, _snake_case )] for i in range(0, _snake_case )
] # dp[i][j] stores minimum distance from i to j
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = w
def a__ (self ):
'''simple docstring'''
for k in range(0, self.n ):
for i in range(0, self.n ):
for j in range(0, self.n ):
lowerCamelCase__ : Tuple = min(self.dp[i][j], self.dp[i][k] + self.dp[k][j] )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
A_ : str = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 361 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : Any = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : str = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Union[str, Any] = c.n_embd + 1 # int
lowerCamelCase__ : Optional[Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : str = not c.scale_attn_weights # bool
lowerCamelCase__ : Any = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = PretrainedConfig()
lowerCamelCase__ : Union[str, Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : str = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = mock.Mock()
lowerCamelCase__ : str = 5_0_0
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : Any = HTTPError
lowerCamelCase__ : str = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : Optional[Any] = ['config.42.0.0.json']
lowerCamelCase__ : List[Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : str = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Dict = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Optional[Any] = 'v3.0.0'
lowerCamelCase__ : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 316 | 0 |
from __future__ import annotations
from typing import TypedDict
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : str
UpperCamelCase : int
def _SCREAMING_SNAKE_CASE ( a ) -> list[str]:
if not isinstance(a__ , a__ ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(a__ ) )]
def _SCREAMING_SNAKE_CASE ( a ) -> BWTTransformDict:
if not isinstance(a__ , a__ ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
__A : Tuple = all_rotations(a__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
__A : Optional[Any] = {
'bwt_string': ''.join([word[-1] for word in rotations] ),
'idx_original_string': rotations.index(a__ ),
}
return response
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
if not isinstance(a__ , a__ ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
__A : Optional[Any] = int(a__ )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(a__ ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
__A : Tuple = [''] * len(a__ )
for _ in range(len(a__ ) ):
for i in range(len(a__ ) ):
__A : Tuple = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
UpperCAmelCase : int = '''Provide a string that I will generate its BWT transform: '''
UpperCAmelCase : int = input(entry_msg).strip()
UpperCAmelCase : Any = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string \'{s}\' results """
F"""in \'{result["bwt_string"]}\'"""
)
UpperCAmelCase : List[Any] = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F"""Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' """
F"""we get original string \'{original_string}\'"""
)
| 280 |
from __future__ import annotations
def lowerCAmelCase__ ( a__: dict , a__: str ) -> set[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = set(a__ ), [start]
while stack:
_UpperCAmelCase = stack.pop()
explored.add(a__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(a__ )
return explored
lowerCAmelCase__ :Tuple = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 329 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 326 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class _a ( logging.LoggerAdapter):
"""simple docstring"""
@staticmethod
def lowercase__ ( __UpperCamelCase : Optional[Any] )->List[Any]:
_UpperCAmelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Union[str, Any] )->int:
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCAmelCase = kwargs.pop('''main_process_only''' , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop('''in_order''' , __UpperCamelCase )
if self.isEnabledFor(__UpperCamelCase ):
if self._should_log(__UpperCamelCase ):
_UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
elif in_order:
_UpperCAmelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
state.wait_for_everyone()
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = None ):
'''simple docstring'''
if log_level is None:
_UpperCAmelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = logging.getLogger(_SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_SCREAMING_SNAKE_CASE , {} )
| 326 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
SCREAMING_SNAKE_CASE_: int ={
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[str] =[
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Dict = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['ConvNextFeatureExtractor']
lowerCAmelCase_ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 358 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCAmelCase_ = _symbol_database.Default()
lowerCAmelCase_ = _descriptor_pool.Default().AddSerializedFile(
B'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
lowerCAmelCase_ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCAmelCase_ = None
lowerCAmelCase_ = B'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCAmelCase_ = 45
lowerCAmelCase_ = 1_581
lowerCAmelCase_ = 1_517
lowerCAmelCase_ = 1_570
lowerCAmelCase_ = 1_584
lowerCAmelCase_ = 1_793
lowerCAmelCase_ = 1_795
lowerCAmelCase_ = 1_916
lowerCAmelCase_ = 1_864
lowerCAmelCase_ = 1_905
lowerCAmelCase_ = 1_919
lowerCAmelCase_ = 2_429
lowerCAmelCase_ = 2_208
lowerCAmelCase_ = 2_418
lowerCAmelCase_ = 2_323
lowerCAmelCase_ = 2_407
# @@protoc_insertion_point(module_scope)
| 302 | 0 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase ( UpperCAmelCase ) -> Union[str, Any]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case_ = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase , id=UpperCAmelCase )
| 69 |
from __future__ import annotations
def snake_case_ ( snake_case , snake_case ) -> list[int]:
lowercase__: List[str] = 0
lowercase__: Dict = len(snake_case ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowercase__: Dict = i + 1
else:
lowercase__: List[Any] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 196 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase :Dict = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Any , *_A : int , **_A : Tuple ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , _A , )
super().__init__(*_A , **_A ) | 275 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase :Optional[int] = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase ( lowerCAmelCase : Any ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Dict ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
__magic_name__ : Tuple = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase , id=lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
if exitstatus == 5:
__magic_name__ : Any = 0
# Doctest custom flag to ignore output.
lowerCAmelCase :List[str] = doctest.register_optionflag('''IGNORE_RESULT''')
lowerCAmelCase :Union[str, Any] = doctest.OutputChecker
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Tuple , _A : Tuple , _A : str ) -> int:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _A , _A , _A )
lowerCAmelCase :Optional[Any] = CustomOutputChecker
lowerCAmelCase :int = HfDoctestModule
lowerCAmelCase :Any = HfDocTestParser | 275 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
A ='0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def snake_case_ (_a : str , _a : Optional[Any] , _a : Dict=None ):
if rng is None:
UpperCAmelCase = random.Random()
UpperCAmelCase = 1
for dim in shape:
total_dims *= dim
UpperCAmelCase = []
for _ in range(_a ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCAmelCase = np.array(_a , dtype=jnp.intaa ).reshape(_a )
return output
def snake_case_ (_a : Any , _a : int=None ):
UpperCAmelCase = ids_tensor(_a , vocab_size=2 , rng=_a )
# make sure that at least one token is attended to for each batch
UpperCAmelCase = 1
return attn_mask
@require_flax
class _a :
__a : Dict = None
__a : List[Any] = ()
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCAmelCase = 2
UpperCAmelCase = inputs['''input_ids'''].shape[-1] // 2
UpperCAmelCase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
UpperCAmelCase = jnp.ones_like(lowercase )
UpperCAmelCase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCAmelCase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCAmelCase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
UpperCAmelCase = 0
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowercase )
UpperCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase = getattr(lowercase , lowercase )
UpperCAmelCase = pt_model_class(lowercase ).eval()
UpperCAmelCase = load_flax_weights_in_pytorch_model(lowercase , flax_model.params )
UpperCAmelCase = flax_model.generate(lowercase ).sequences
UpperCAmelCase = pt_model.generate(torch.tensor(lowercase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCAmelCase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowercase )
UpperCAmelCase = model.generate(lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = True
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowercase )
UpperCAmelCase = model.generate(lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
UpperCAmelCase = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowercase )
UpperCAmelCase = model.generate(lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
UpperCAmelCase = 2
UpperCAmelCase = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowercase )
UpperCAmelCase = model.generate(lowercase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = True
UpperCAmelCase = max_length
UpperCAmelCase = 0.8
UpperCAmelCase = 10
UpperCAmelCase = 0.3
UpperCAmelCase = 1
UpperCAmelCase = 8
UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowercase )
UpperCAmelCase = model.generate(lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = max_length
UpperCAmelCase = 1
UpperCAmelCase = 8
UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowercase )
UpperCAmelCase = model.generate(lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = max_length
UpperCAmelCase = 2
UpperCAmelCase = 1
UpperCAmelCase = 8
UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowercase )
UpperCAmelCase = model.generate(lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase = False
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowercase )
UpperCAmelCase = model.generate(lowercase , attention_mask=lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowercase , attention_mask=lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase = True
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowercase )
UpperCAmelCase = model.generate(lowercase , attention_mask=lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowercase , attention_mask=lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase = 2
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowercase )
UpperCAmelCase = model.generate(lowercase , attention_mask=lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowercase , attention_mask=lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _a ( unittest.TestCase ):
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
UpperCAmelCase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase = '''Hello world'''
UpperCAmelCase = tokenizer(lowercase , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowercase , '''do_samples''' ):
model.generate(lowercase , do_samples=lowercase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowercase , '''foo''' ):
UpperCAmelCase = {'''foo''': '''bar'''}
model.generate(lowercase , **lowercase )
| 34 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def snake_case_ (_a : dict , _a : str , _a : set , _a : set , _a : dict , _a : dict , _a : PriorityQueue , _a : dict , _a : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
UpperCAmelCase = cst_fwd.get(_a , np.inf )
UpperCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
UpperCAmelCase = new_cost_f
UpperCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def snake_case_ (_a : str , _a : str , _a : dict , _a : dict ):
UpperCAmelCase = -1
UpperCAmelCase = set()
UpperCAmelCase = set()
UpperCAmelCase = {source: 0}
UpperCAmelCase = {destination: 0}
UpperCAmelCase = {source: None}
UpperCAmelCase = {destination: None}
UpperCAmelCase = PriorityQueue()
UpperCAmelCase = PriorityQueue()
UpperCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
UpperCAmelCase , UpperCAmelCase = queue_forward.get()
visited_forward.add(_a )
UpperCAmelCase , UpperCAmelCase = queue_backward.get()
visited_backward.add(_a )
UpperCAmelCase = pass_and_relaxation(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
UpperCAmelCase = pass_and_relaxation(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
UpperCAmelCase = shortest_distance
return shortest_path_distance
A ={
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
A ={
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = (PNDMScheduler,)
lowercase__ = (("num_inference_steps", 50),)
def __lowerCAmelCase ( self : Optional[int] ,**lowercase_ : Optional[int] ):
lowerCAmelCase__ : List[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowercase_ )
return config
def __lowerCAmelCase ( self : Tuple ,lowercase_ : List[str]=0 ,**lowercase_ : int ):
lowerCAmelCase__ : int = dict(self.forward_default_kwargs )
lowerCAmelCase__ : Optional[Any] = kwargs.pop('''num_inference_steps''' ,lowercase_ )
lowerCAmelCase__ : str = self.dummy_sample
lowerCAmelCase__ : Optional[Any] = 0.1 * sample
lowerCAmelCase__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : Optional[Any] = self.get_scheduler_config(**lowercase_ )
lowerCAmelCase__ : int = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
lowerCAmelCase__ : Any = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
lowerCAmelCase__ : Dict = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
lowerCAmelCase__ : List[str] = dummy_past_residuals[:]
lowerCAmelCase__ : Union[str, Any] = scheduler.step_prk(lowercase_ ,lowercase_ ,lowercase_ ,**lowercase_ ).prev_sample
lowerCAmelCase__ : List[str] = new_scheduler.step_prk(lowercase_ ,lowercase_ ,lowercase_ ,**lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase__ : Any = scheduler.step_plms(lowercase_ ,lowercase_ ,lowercase_ ,**lowercase_ ).prev_sample
lowerCAmelCase__ : Dict = new_scheduler.step_plms(lowercase_ ,lowercase_ ,lowercase_ ,**lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : int ):
pass
def __lowerCAmelCase ( self : Tuple ,lowercase_ : Dict=0 ,**lowercase_ : Any ):
lowerCAmelCase__ : Tuple = dict(self.forward_default_kwargs )
lowerCAmelCase__ : Union[str, Any] = kwargs.pop('''num_inference_steps''' ,lowercase_ )
lowerCAmelCase__ : Optional[int] = self.dummy_sample
lowerCAmelCase__ : List[Any] = 0.1 * sample
lowerCAmelCase__ : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Union[str, Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase__ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
lowerCAmelCase__ : Optional[int] = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase__ : List[Any] = dummy_past_residuals[:]
lowerCAmelCase__ : List[Any] = scheduler.step_prk(lowercase_ ,lowercase_ ,lowercase_ ,**lowercase_ ).prev_sample
lowerCAmelCase__ : List[Any] = new_scheduler.step_prk(lowercase_ ,lowercase_ ,lowercase_ ,**lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase__ : List[Any] = scheduler.step_plms(lowercase_ ,lowercase_ ,lowercase_ ,**lowercase_ ).prev_sample
lowerCAmelCase__ : Optional[int] = new_scheduler.step_plms(lowercase_ ,lowercase_ ,lowercase_ ,**lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : List[str] ,**lowercase_ : int ):
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config(**lowercase_ )
lowerCAmelCase__ : Optional[Any] = scheduler_class(**lowercase_ )
lowerCAmelCase__ : Tuple = 1_0
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : Dict = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowerCAmelCase__ : Optional[Any] = model(lowercase_ ,lowercase_ )
lowerCAmelCase__ : str = scheduler.step_prk(lowercase_ ,lowercase_ ,lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowerCAmelCase__ : Dict = model(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Tuple = scheduler.step_plms(lowercase_ ,lowercase_ ,lowercase_ ).prev_sample
return sample
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : str = dict(self.forward_default_kwargs )
lowerCAmelCase__ : Optional[Any] = kwargs.pop('''num_inference_steps''' ,lowercase_ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : Optional[Any] = self.get_scheduler_config()
lowerCAmelCase__ : str = scheduler_class(**lowercase_ )
lowerCAmelCase__ : Any = self.dummy_sample
lowerCAmelCase__ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ ,'''set_timesteps''' ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ ,'''set_timesteps''' ):
lowerCAmelCase__ : Optional[int] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase__ : int = dummy_past_residuals[:]
lowerCAmelCase__ : Optional[int] = scheduler.step_prk(lowercase_ ,0 ,lowercase_ ,**lowercase_ ).prev_sample
lowerCAmelCase__ : Any = scheduler.step_prk(lowercase_ ,1 ,lowercase_ ,**lowercase_ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
lowerCAmelCase__ : Tuple = scheduler.step_plms(lowercase_ ,0 ,lowercase_ ,**lowercase_ ).prev_sample
lowerCAmelCase__ : List[str] = scheduler.step_plms(lowercase_ ,1 ,lowercase_ ,**lowercase_ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCAmelCase ( self : Union[str, Any] ):
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def __lowerCAmelCase ( self : Any ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config(steps_offset=1 )
lowerCAmelCase__ : List[str] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) ,)
def __lowerCAmelCase ( self : Any ):
for beta_start, beta_end in zip([0.0001, 0.001] ,[0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase_ ,beta_end=lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def __lowerCAmelCase ( self : Tuple ):
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=lowercase_ )
def __lowerCAmelCase ( self : Tuple ):
for t, num_inference_steps in zip([1, 5, 1_0] ,[1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
lowerCAmelCase__ : Dict = 2_7
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : Any = self.dummy_sample
lowerCAmelCase__ : Tuple = 0.1 * sample
lowerCAmelCase__ : Dict = self.get_scheduler_config()
lowerCAmelCase__ : str = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowerCAmelCase__ : List[str] = scheduler.step_prk(lowercase_ ,lowercase_ ,lowercase_ ).prev_sample
def __lowerCAmelCase ( self : Optional[int] ):
with self.assertRaises(lowercase_ ):
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : List[Any] = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Optional[Any] = self.full_loop()
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(lowercase_ ) )
lowerCAmelCase__ : str = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : Optional[int] = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase__ : Any = torch.sum(torch.abs(lowercase_ ) )
lowerCAmelCase__ : List[Any] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def __lowerCAmelCase ( self : List[Any] ):
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase__ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ ,beta_start=0.01 )
lowerCAmelCase__ : Dict = torch.sum(torch.abs(lowercase_ ) )
lowerCAmelCase__ : List[str] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def __lowerCAmelCase ( self : Dict ):
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase__ : Dict = self.full_loop(set_alpha_to_one=lowercase_ ,beta_start=0.01 )
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(lowercase_ ) )
lowerCAmelCase__ : int = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 358 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : str = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase : List[str] = {
'''squeezebert/squeezebert-uncased''': 5_1_2,
'''squeezebert/squeezebert-mnli''': 5_1_2,
'''squeezebert/squeezebert-mnli-headless''': 5_1_2,
}
__UpperCamelCase : Tuple = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = SqueezeBertTokenizer
def __init__( self : str ,lowercase_ : Union[str, Any]=None ,lowercase_ : int=None ,lowercase_ : List[str]=True ,lowercase_ : str="[UNK]" ,lowercase_ : int="[SEP]" ,lowercase_ : Tuple="[PAD]" ,lowercase_ : Optional[int]="[CLS]" ,lowercase_ : Dict="[MASK]" ,lowercase_ : Optional[Any]=True ,lowercase_ : Union[str, Any]=None ,**lowercase_ : List[Any] ,):
super().__init__(
lowercase_ ,tokenizer_file=lowercase_ ,do_lower_case=lowercase_ ,unk_token=lowercase_ ,sep_token=lowercase_ ,pad_token=lowercase_ ,cls_token=lowercase_ ,mask_token=lowercase_ ,tokenize_chinese_chars=lowercase_ ,strip_accents=lowercase_ ,**lowercase_ ,)
lowerCAmelCase__ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,lowercase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,lowercase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,lowercase_ ) != tokenize_chinese_chars
):
lowerCAmelCase__ : List[str] = getattr(lowercase_ ,normalizer_state.pop('''type''' ) )
lowerCAmelCase__ : List[Any] = do_lower_case
lowerCAmelCase__ : Optional[int] = strip_accents
lowerCAmelCase__ : Union[str, Any] = tokenize_chinese_chars
lowerCAmelCase__ : Optional[int] = normalizer_class(**lowercase_ )
lowerCAmelCase__ : int = do_lower_case
def __lowerCAmelCase ( self : Any ,lowercase_ : Any ,lowercase_ : Optional[Any]=None ):
lowerCAmelCase__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : str ,lowercase_ : List[int] ,lowercase_ : Optional[List[int]] = None ):
lowerCAmelCase__ : str = [self.sep_token_id]
lowerCAmelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : str ,lowercase_ : Optional[str] = None ):
lowerCAmelCase__ : int = self._tokenizer.model.save(lowercase_ ,name=lowercase_ )
return tuple(lowercase_ )
| 74 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def snake_case_ ( ) -> List[str]:
UpperCAmelCase : List[str] = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCAmelCase : int = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(__A )
# Let's go
UpperCAmelCase : List[Any] = parser.parse_args()
if not hasattr(__A , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCAmelCase : Tuple = args.func(__A )
service.run()
if __name__ == "__main__":
main()
| 23 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __snake_case ( a , a , a , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
UpperCAmelCase__ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0)
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Any , _snake_case : Dict=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = 2
UpperCAmelCase_ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , )
UpperCAmelCase_ = floats_tensor(control_image.shape , rng=random.Random(_snake_case)).to(_snake_case)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_snake_case)).convert('''RGB''').resize((64, 64))
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase ( self : Any):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : Any):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : str = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : str = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCamelCase ( self : str):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0)
def init_weights(_snake_case : Optional[int]):
if isinstance(_snake_case , torch.nn.Convad):
torch.nn.init.normal(m.weight)
m.bias.data.fill_(1.0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case)
torch.manual_seed(0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case)
torch.manual_seed(0)
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = MultiControlNetModel([controlneta, controlneta])
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase ( self : int , _snake_case : Union[str, Any] , _snake_case : str=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = 2
UpperCAmelCase_ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , ),
]
UpperCAmelCase_ = floats_tensor(control_image[0].shape , rng=random.Random(_snake_case)).to(_snake_case)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_snake_case)).convert('''RGB''').resize((64, 64))
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
UpperCAmelCase_ = 1_0.0
UpperCAmelCase_ = 4
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case)[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2)[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7])[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8])[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : int):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def lowerCamelCase ( self : int):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_snake_case)
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''')
UpperCAmelCase_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=_snake_case , controlnet=_snake_case)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = '''evil space-punk bird'''
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''').resize((512, 512))
UpperCAmelCase_ = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''').resize((512, 512))
UpperCAmelCase_ = pipe(
_snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''')
assert np.abs(expected_image - image).max() < 9e-2
| 51 | 0 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A( a , unittest.TestCase ):
snake_case_ = PhobertTokenizer
snake_case_ = False
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
__a = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__a = ['''#version: 0.2''', '''l à</w>''']
__a = {'''unk_token''': '''<unk>'''}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = '''Tôi là VinAI Research'''
__a = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a = '''Tôi là VinAI Research'''
__a = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
__a = tokenizer.tokenize(_snake_case )
print(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__a = tokens + [tokenizer.unk_token]
__a = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case ) | 33 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
A : str = parser.parse_args()
if args.model_type == "roberta":
A : Union[str, Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
A : Any = 'roberta'
elif args.model_type == "gpt2":
A : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name)
A : List[str] = 'transformer'
A : Dict = model.state_dict()
A : Any = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
A : Union[str, Any] = state_dict[F"{prefix}.{param_name}"]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
A : Any = F"{prefix}.embeddings.{w}.weight"
A : Union[str, Any] = state_dict[param_name]
for w in ["weight", "bias"]:
A : List[Any] = F"{prefix}.embeddings.LayerNorm.{w}"
A : List[str] = state_dict[param_name]
# Transformer Blocks #
A : Optional[int] = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
A : Any = state_dict[
F"{prefix}.h.{teacher_idx}.{layer}.{w}"
]
A : List[str] = state_dict[F"{prefix}.h.{teacher_idx}.attn.bias"]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
A : List[Any] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
A : Optional[int] = state_dict[F"{layer}"]
if args.vocab_transform:
for w in ["weight", "bias"]:
A : List[Any] = state_dict[F"lm_head.dense.{w}"]
A : List[str] = state_dict[F"lm_head.layer_norm.{w}"]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
A : List[str] = state_dict[F"{prefix}.ln_f.{w}"]
A : Dict = state_dict['lm_head.weight']
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint) | 33 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class a__( __a ):
'''simple docstring'''
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return 0.0
def snake_case__ ( _A: np.ndarray , _A: int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowerCAmelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def snake_case__ ( _A: FilterType , _A: int ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = 512
lowerCAmelCase = [1] + [0] * (size - 1)
lowerCAmelCase = [filter_type.process(__lowerCAmelCase ) for item in inputs]
lowerCAmelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
lowerCAmelCase = np.abs(np.fft.fft(__lowerCAmelCase ) )
lowerCAmelCase = 20 * np.logaa(__lowerCAmelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
lowerCAmelCase = get_bounds(__lowerCAmelCase , __lowerCAmelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__lowerCAmelCase )
plt.show()
def snake_case__ ( _A: FilterType , _A: int ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = 512
lowerCAmelCase = [1] + [0] * (size - 1)
lowerCAmelCase = [filter_type.process(__lowerCAmelCase ) for item in inputs]
lowerCAmelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
lowerCAmelCase = np.angle(np.fft.fft(__lowerCAmelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__lowerCAmelCase , -2 * pi ) )
plt.show()
| 272 |
import requests
_A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 231 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __snake_case :
__lowerCamelCase : Dict = BlenderbotConfig
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Any = """gelu"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=20 , snake_case__=2 , snake_case__=1 , snake_case__=0 , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : str =parent
UpperCAmelCase : Optional[int] =batch_size
UpperCAmelCase : int =seq_length
UpperCAmelCase : Optional[Any] =is_training
UpperCAmelCase : Optional[Any] =use_labels
UpperCAmelCase : int =vocab_size
UpperCAmelCase : Tuple =hidden_size
UpperCAmelCase : str =num_hidden_layers
UpperCAmelCase : List[str] =num_attention_heads
UpperCAmelCase : Dict =intermediate_size
UpperCAmelCase : Union[str, Any] =hidden_dropout_prob
UpperCAmelCase : Optional[Any] =attention_probs_dropout_prob
UpperCAmelCase : List[Any] =max_position_embeddings
UpperCAmelCase : Tuple =eos_token_id
UpperCAmelCase : List[Any] =pad_token_id
UpperCAmelCase : Tuple =bos_token_id
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : Any =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : Tuple =tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Dict =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : Optional[Any] =prepare_blenderbot_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =TFBlenderbotModel(config=snake_case__ ).get_decoder()
UpperCAmelCase : int =inputs_dict['''input_ids''']
UpperCAmelCase : Optional[Any] =input_ids[:1, :]
UpperCAmelCase : List[str] =inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase : Dict =inputs_dict['''head_mask''']
UpperCAmelCase : Optional[Any] =1
# first forward pass
UpperCAmelCase : List[Any] =model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ )
UpperCAmelCase : Optional[Any] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase : str =ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : int =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase : Optional[int] =tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase : Any =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase : int =model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCAmelCase : Optional[int] =model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase : str =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase : Tuple =output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase : int =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , )-> Any:
if attention_mask is None:
UpperCAmelCase : Union[str, Any] =tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : Optional[Any] =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : Optional[int] =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : Dict =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowerCamelCase : Dict = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase : Optional[Any] = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase : Any = True
__lowerCamelCase : int = False
__lowerCamelCase : List[Any] = False
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[str] =TFBlenderbotModelTester(self )
UpperCAmelCase : List[Any] =ConfigTester(self , config_class=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_tokenizers
@require_tf
class __snake_case ( unittest.TestCase ):
__lowerCamelCase : List[Any] = ["""My friends are cool but they eat too many carbs."""]
__lowerCamelCase : str = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =self.tokenizer(self.src_text , return_tensors='''tf''' )
UpperCAmelCase : Union[str, Any] =self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase : str =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 365 | from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : str = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : str = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> List[str]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> Tuple:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> List[str]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> Optional[int]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> Union[str, Any]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> Optional[int]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__lowerCAmelCase , **__lowerCAmelCase )-> List[Any]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : int = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : int = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : str = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Tuple = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Tuple = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Tuple = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Tuple = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Tuple = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : int = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : int = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Tuple = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : str = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : int = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : str = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Any = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Dict = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : str = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : Tuple = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class __snake_case ( metaclass=lowerCamelCase__ ):
__lowerCamelCase : List[str] = ["""torch"""]
def __init__( self , *snake_case__ , **snake_case__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def UpperCAmelCase__ ( cls , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
| 78 | 0 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
return int((input_a, input_a).count(0 ) == 0 )
def lowerCamelCase_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 19 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : list[int] ) -> float:
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
snake_case : List[str] = sum(lowercase ) / len(lowercase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 203 | 0 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class a_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = field(
metadata={'help': 'The output directory where the model will be written.'} , )
__SCREAMING_SNAKE_CASE : str = field(
metadata={
'help': (
'The encoder model checkpoint for weights initialization.'
'Don\'t set if you want to train an encoder model from scratch.'
)
} , )
__SCREAMING_SNAKE_CASE : str = field(
metadata={
'help': (
'The decoder model checkpoint for weights initialization.'
'Don\'t set if you want to train a decoder model from scratch.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments,) )
(SCREAMING_SNAKE_CASE ) : List[str] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=a__ , decoder_config=a__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
SCREAMING_SNAKE_CASE : str = decoder_config.decoder_start_token_id
SCREAMING_SNAKE_CASE : Any = decoder_config.pad_token_id
if decoder_start_token_id is None:
SCREAMING_SNAKE_CASE : Any = decoder_config.bos_token_id
if pad_token_id is None:
SCREAMING_SNAKE_CASE : str = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_config.eos_token_id
SCREAMING_SNAKE_CASE : Any = decoder_start_token_id
SCREAMING_SNAKE_CASE : int = pad_token_id
SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 350 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Tuple = '''▁'''
a__ : List[Any] = {'''vocab_file''': '''spiece.model'''}
a__ : Optional[Any] = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
a__ : str = {
'''google/pegasus-xsum''': 512,
}
a__ : str = logging.get_logger(__name__)
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase , _lowerCamelCase="<pad>" , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<mask_2>" , _lowerCamelCase="<mask_1>" , _lowerCamelCase=None , _lowerCamelCase=103 , _lowerCamelCase = None , **_lowerCamelCase , ) ->None:
SCREAMING_SNAKE_CASE : Dict = offset
if additional_special_tokens is not None:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError(
F"""additional_special_tokens should be of type {type(_lowerCamelCase )}, but is"""
F""" {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(_lowerCamelCase ) , self.offset - 1 )
]
if len(set(_lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
SCREAMING_SNAKE_CASE : Dict = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE : str = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
SCREAMING_SNAKE_CASE : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , mask_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token_sent=_lowerCamelCase , offset=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : List[str] = mask_token_sent
SCREAMING_SNAKE_CASE : Optional[int] = vocab_file
SCREAMING_SNAKE_CASE : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
# add special tokens to encoder dict
SCREAMING_SNAKE_CASE : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
SCREAMING_SNAKE_CASE : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def __lowerCAmelCase ( self ) ->int:
return len(self.sp_model ) + self.offset
def __lowerCAmelCase ( self ) ->Dict[str, int]:
SCREAMING_SNAKE_CASE : Union[str, Any] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = self.__dict__.copy()
SCREAMING_SNAKE_CASE : str = None
return state
def __setstate__( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : List[str] = {}
SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
SCREAMING_SNAKE_CASE : List[str] = self.sp_model.piece_to_id(_lowerCamelCase )
return sp_id + self.offset
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
SCREAMING_SNAKE_CASE : Dict = self.sp_model.IdToPiece(index - self.offset )
return token
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : int = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase ) + token
SCREAMING_SNAKE_CASE : Optional[Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->str:
return 1
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : Dict = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) ->List[int]:
if already_has_special_tokens:
return self._special_token_mask(_lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(_lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : int = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : Tuple = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 19 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
_A : Any = logging.get_logger(__name__)
_A : List[str] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
_A : Any = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
_A : Any = {
"""facebook/bart-base""": 10_24,
"""facebook/bart-large""": 10_24,
"""facebook/bart-large-mnli""": 10_24,
"""facebook/bart-large-cnn""": 10_24,
"""facebook/bart-large-xsum""": 10_24,
"""yjernite/bart_eli5""": 10_24,
}
class a__ ( a_ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase = BartTokenizer
def __init__( self , _a=None , _a=None , _a=None , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , _a=True , **_a , ):
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
lowercase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _a ) != add_prefix_space:
lowercase : Union[str, Any] = getattr(_a , pre_tok_state.pop("type" ) )
lowercase : Optional[int] = add_prefix_space
lowercase : int = pre_tok_class(**_a )
lowercase : Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase : Optional[int] = "post_processor"
lowercase : List[str] = getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
lowercase : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Optional[int] = tuple(state["sep"] )
if "cls" in state:
lowercase : int = tuple(state["cls"] )
lowercase : int = False
if state.get("add_prefix_space" , _a ) != add_prefix_space:
lowercase : int = add_prefix_space
lowercase : List[str] = True
if state.get("trim_offsets" , _a ) != trim_offsets:
lowercase : Dict = trim_offsets
lowercase : List[str] = True
if changes_to_apply:
lowercase : List[Any] = getattr(_a , state.pop("type" ) )
lowercase : Union[str, Any] = component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
def __magic_name__ ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __magic_name__ ( self , _a ):
lowercase : List[str] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
lowercase : List[Any] = value
def __magic_name__ ( self , *_a , **_a ):
lowercase : Dict = kwargs.get("is_split_into_words" , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*_a , **_a )
def __magic_name__ ( self , *_a , **_a ):
lowercase : Tuple = kwargs.get("is_split_into_words" , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*_a , **_a )
def __magic_name__ ( self , _a , _a = None ):
lowercase : str = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __magic_name__ ( self , _a , _a=None ):
lowercase : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __magic_name__ ( self , _a , _a = None ):
lowercase : Union[str, Any] = [self.sep_token_id]
lowercase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 202 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_A : Union[str, Any] = None
_A : str = logging.get_logger(__name__)
_A : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_A : Optional[Any] = {
"""vocab_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/spiece.model""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json""",
},
}
_A : Optional[Any] = {
"""google/fnet-base""": 5_12,
"""google/fnet-large""": 5_12,
}
_A : List[str] = """▁"""
class a__ ( a_ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """token_type_ids"""]
__lowerCAmelCase = FNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase : int = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
lowercase : Dict = do_lower_case
lowercase : Union[str, Any] = remove_space
lowercase : Any = keep_accents
lowercase : List[Any] = vocab_file
lowercase : Union[str, Any] = False if not self.vocab_file else True
def __magic_name__ ( self , _a , _a = None ):
lowercase : Optional[Any] = [self.sep_token_id]
lowercase : str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __magic_name__ ( self , _a , _a = None ):
lowercase : Any = [self.sep_token_id]
lowercase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Optional[Any] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 202 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __snake_case ( _lowerCAmelCase : int ) -> bool:
A_ : Optional[Any] = int(number**0.5 )
return number == sq * sq
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> tuple[int, int]:
A_ : Tuple = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
A_ : Union[str, Any] = x_den * y_den * z_den
A_ : str = gcd(_UpperCamelCase , _UpperCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __snake_case ( _lowerCAmelCase : int = 35 ) -> int:
A_ : Optional[int] = set()
A_ : List[str] = 42
A_ : Optional[int] = Fraction(0 )
A_ : List[Any] = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
A_ : Dict = x_num * y_den + x_den * y_num
A_ : List[Any] = x_den * y_den
A_ : Optional[int] = gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A_ : Optional[Any] = add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
# n=2
A_ : Optional[Any] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
A_ : Union[str, Any] = x_den * x_den * y_den * y_den
if is_sq(_UpperCamelCase ) and is_sq(_UpperCamelCase ):
A_ : List[Any] = int(sqrt(_UpperCamelCase ) )
A_ : Any = int(sqrt(_UpperCamelCase ) )
A_ : Optional[int] = gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A_ : List[Any] = add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
# n=-1
A_ : Any = x_num * y_num
A_ : List[Any] = x_den * y_num + x_num * y_den
A_ : Optional[int] = gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A_ : Tuple = add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
# n=2
A_ : Union[str, Any] = x_num * x_num * y_num * y_num
A_ : Optional[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_UpperCamelCase ) and is_sq(_UpperCamelCase ):
A_ : Tuple = int(sqrt(_UpperCamelCase ) )
A_ : List[str] = int(sqrt(_UpperCamelCase ) )
A_ : Optional[Any] = gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A_ : List[str] = add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
for num, den in unique_s:
total += Fraction(_UpperCamelCase , _UpperCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'''{solution() = }''')
| 359 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_lowerCAmelCase : str = logging.get_logger(__name__)
@dataclass
class __magic_name__ :
"""simple docstring"""
def __init__( self :Dict , snake_case :List[str]=False , snake_case :Optional[Any]=False , snake_case :Union[str, Any]=6.0 , snake_case :Union[str, Any]=None , snake_case :Union[str, Any]=False , snake_case :str=False , snake_case :Optional[Any]=None , snake_case :int="fp4" , snake_case :int=False , **snake_case :Optional[Any] , ):
'''simple docstring'''
A_ : int = load_in_abit
A_ : Union[str, Any] = load_in_abit
A_ : str = llm_inta_threshold
A_ : str = llm_inta_skip_modules
A_ : List[Any] = llm_inta_enable_fpaa_cpu_offload
A_ : Optional[int] = llm_inta_has_fpaa_weight
A_ : Optional[int] = bnb_abit_quant_type
A_ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
A_ : List[Any] = torch.floataa
elif isinstance(snake_case , snake_case ):
A_ : Any = getattr(snake_case , snake_case )
elif isinstance(snake_case , torch.dtype ):
A_ : Union[str, Any] = bnb_abit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" )
self.post_init()
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
if not isinstance(self.llm_inta_threshold , snake_case ):
raise ValueError("llm_int8_threshold must be a float" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , snake_case ):
raise ValueError("llm_int8_skip_modules must be a list of strings" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , snake_case ):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" )
if not isinstance(self.llm_inta_has_fpaa_weight , snake_case ):
raise ValueError("llm_int8_has_fp16_weight must be a boolean" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" )
if not isinstance(self.bnb_abit_quant_type , snake_case ):
raise ValueError("bnb_4bit_quant_type must be a string" )
if not isinstance(self.bnb_abit_use_double_quant , snake_case ):
raise ValueError("bnb_4bit_use_double_quant must be a boolean" )
if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse(
"0.39.0" ):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return self.load_in_abit or self.load_in_abit
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def SCREAMING_SNAKE_CASE ( cls :List[str] , snake_case :Dict , snake_case :str , **snake_case :Dict ):
'''simple docstring'''
A_ : str = cls(**snake_case )
A_ : Any = []
for key, value in kwargs.items():
if hasattr(snake_case , snake_case ):
setattr(snake_case , snake_case , snake_case )
to_remove.append(snake_case )
for key in to_remove:
kwargs.pop(snake_case , snake_case )
if return_unused_kwargs:
return config, kwargs
else:
return config
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Union[str, os.PathLike] ):
'''simple docstring'''
with open(snake_case , "w" , encoding="utf-8" ) as writer:
A_ : List[Any] = self.to_dict()
A_ : int = json.dumps(snake_case , indent=2 , sort_keys=snake_case ) + "\n"
writer.write(snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : List[str] = copy.deepcopy(self.__dict__ )
A_ : Optional[int] = str(output["bnb_4bit_compute_dtype"] ).split("." )[1]
return output
def __repr__( self :List[str] ):
'''simple docstring'''
return f"{self.__class__.__name__} {self.to_json_string()}"
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :bool = True ):
'''simple docstring'''
if use_diff is True:
A_ : List[str] = self.to_diff_dict()
else:
A_ : int = self.to_dict()
return json.dumps(snake_case , indent=2 , sort_keys=snake_case ) + "\n"
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : List[Any] = self.to_dict()
# get the default config dict
A_ : Optional[Any] = BitsAndBytesConfig().to_dict()
A_ : List[Any] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
A_ : int = value
return serializable_config_dict
| 70 | 0 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCamelCase ( __lowercase : Dict ,__lowercase : str ,__lowercase : Tuple ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Any = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ : List[str] = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
A_ : int = f'''{src_lang}-{tgt_lang}'''
A_ : int = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=__lowercase ,exist_ok=__lowercase )
A_ : Any = os.path.join(__lowercase ,'README.md' )
print(f'''Generating {path}''' )
with open(__lowercase ,'w' ,encoding='utf-8' ) as f:
f.write(__lowercase )
# make sure we are under the root of the project
_UpperCAmelCase = Path(__file__).resolve().parent.parent.parent
_UpperCAmelCase = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_UpperCAmelCase = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 140 | import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_UpperCAmelCase = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
_UpperCAmelCase = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
_UpperCAmelCase = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
_UpperCAmelCase = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
_UpperCAmelCase = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Tuple ):
'''simple docstring'''
for tf_name, hf_name in patterns:
A_ : Tuple = k.replace(__lowercase ,__lowercase )
return k
def UpperCamelCase ( __lowercase : dict ,__lowercase : dict ):
'''simple docstring'''
A_ : int = BigBirdPegasusConfig(**__lowercase )
A_ : Any = BigBirdPegasusForConditionalGeneration(__lowercase )
A_ : Union[str, Any] = torch_model.state_dict()
A_ : Any = {}
# separating decoder weights
A_ : Any = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
A_ : str = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() ,'tf -> hf conversion' ):
A_ : Optional[int] = [k.endswith(__lowercase ) for ending in KEYS_TO_IGNORE]
if any(__lowercase ):
continue
A_ : Optional[Any] = DECODER_PATTERNS
A_ : Tuple = rename_state_dict_key(__lowercase ,__lowercase )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
A_ : Any = v.T
A_ : Any = torch.from_numpy(__lowercase )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() ,'tf -> hf conversion' ):
A_ : int = [k.endswith(__lowercase ) for ending in KEYS_TO_IGNORE]
if any(__lowercase ):
continue
A_ : Any = REMAINING_PATTERNS
A_ : List[str] = rename_state_dict_key(__lowercase ,__lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
A_ : int = v.T
A_ : Dict = torch.from_numpy(__lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
A_ : Optional[int] = mapping['model.embed_positions.weight']
A_ : Tuple = mapping.pop('model.embed_positions.weight' )
A_ , A_ : Optional[Any] = torch_model.load_state_dict(__lowercase ,strict=__lowercase )
A_ : Optional[int] = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( __lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : str = tf.train.list_variables(__lowercase )
A_ : Union[str, Any] = {}
A_ : Optional[Any] = ['global_step']
for name, shape in tqdm(__lowercase ,desc='converting tf checkpoint to dict' ):
A_ : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
A_ : Tuple = tf.train.load_variable(__lowercase ,__lowercase )
A_ : Dict = array
return tf_weights
def UpperCamelCase ( __lowercase : str ,__lowercase : str ,__lowercase : dict ):
'''simple docstring'''
A_ : Optional[Any] = get_tf_weights_as_numpy(__lowercase )
A_ : Dict = convert_bigbird_pegasus(__lowercase ,__lowercase )
torch_model.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 140 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 351 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def UpperCamelCase ( a="ro" , a="en" , a="wmt16" , a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
__magic_name__ = F'''{src_lang}-{tgt_lang}'''
print(F'''Converting {dataset}-{pair}''' )
__magic_name__ = datasets.load_dataset(a , a )
if save_dir is None:
__magic_name__ = F'''{dataset}-{pair}'''
__magic_name__ = Path(a )
save_dir.mkdir(exist_ok=a )
for split in ds.keys():
print(F'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
__magic_name__ = '''val''' if split == '''validation''' else split
__magic_name__ = save_dir.joinpath(F'''{fn}.source''' )
__magic_name__ = save_dir.joinpath(F'''{fn}.target''' )
__magic_name__ = src_path.open('''w+''' )
__magic_name__ = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__magic_name__ = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(F'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 98 | 0 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# word like '180' or '身高' or '神'
for char in word:
_UpperCAmelCase = ord(lowercase )
if not _is_chinese_char(lowercase ):
return 0
return 1
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = set()
for token in tokens:
_UpperCAmelCase = len(lowercase ) > 1 and is_chinese(lowercase )
if chinese_word:
word_set.add(lowercase )
_UpperCAmelCase = list(lowercase )
return word_list
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_UpperCAmelCase = max([len(lowercase ) for w in chinese_word_set] )
_UpperCAmelCase = bert_tokens
_UpperCAmelCase , _UpperCAmelCase = 0, len(lowercase )
while start < end:
_UpperCAmelCase = True
if is_chinese(bert_word[start] ):
_UpperCAmelCase = min(end - start ,lowercase )
for i in range(lowercase ,1 ,-1 ):
_UpperCAmelCase = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
_UpperCAmelCase = """##""" + bert_word[j]
_UpperCAmelCase = start + i
_UpperCAmelCase = False
break
if single_word:
start += 1
return bert_word
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
for i in range(0 ,len(lowercase ) ,1_00 ):
_UpperCAmelCase = ltp_tokenizer.pipeline(lines[i : i + 1_00] ,tasks=["""cws"""] ).cws
_UpperCAmelCase = [get_chinese_word(lowercase ) for r in res]
ltp_res.extend(lowercase )
assert len(lowercase ) == len(lowercase )
_UpperCAmelCase = []
for i in range(0 ,len(lowercase ) ,1_00 ):
_UpperCAmelCase = bert_tokenizer(lines[i : i + 1_00] ,add_special_tokens=lowercase ,truncation=lowercase ,max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(lowercase ) == len(lowercase )
_UpperCAmelCase = []
for input_ids, chinese_word in zip(lowercase ,lowercase ):
_UpperCAmelCase = []
for id in input_ids:
_UpperCAmelCase = bert_tokenizer._convert_id_to_token(lowercase )
input_tokens.append(lowercase )
_UpperCAmelCase = add_sub_symbol(lowercase ,lowercase )
_UpperCAmelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowercase ):
if token[:2] == "##":
_UpperCAmelCase = token[2:]
# save chinese tokens' pos
if len(lowercase ) == 1 and _is_chinese_char(ord(lowercase ) ):
ref_id.append(lowercase )
ref_ids.append(lowercase )
assert len(lowercase ) == len(lowercase )
return ref_ids
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name ,"""r""" ,encoding="""utf-8""" ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = [line.strip() for line in data if len(lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_UpperCAmelCase = LTP(args.ltp ) # faster in GPU device
_UpperCAmelCase = BertTokenizer.from_pretrained(args.bert )
_UpperCAmelCase = prepare_ref(lowercase ,lowercase ,lowercase )
with open(args.save_path ,"""w""" ,encoding="""utf-8""" ) as f:
_UpperCAmelCase = [json.dumps(lowercase ) + """\n""" for ref in ref_ids]
f.writelines(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
UpperCAmelCase__ = parser.parse_args()
main(args)
| 289 | """simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 289 | 1 |
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if not head:
return True
# split the list to two parts
snake_case , snake_case = head.next, head
while fast and fast.next:
snake_case = fast.next.next
snake_case = slow.next
snake_case = slow.next
snake_case = None # Don't forget here! But forget still works!
# reverse the second part
snake_case = None
while second:
snake_case = second.next
snake_case = node
snake_case = second
snake_case = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case = node.next
snake_case = head.next
return True
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case = snake_case = snake_case = head
while fast and fast.next:
snake_case , snake_case = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case = [slow.val]
while slow.next:
snake_case = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case = cur.next
return True
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if not head or not head.next:
return True
snake_case = {}
snake_case = 0
while head:
if head.val in d:
d[head.val].append(UpperCamelCase_ )
else:
snake_case = [pos]
snake_case = head.next
pos += 1
snake_case = pos - 1
snake_case = 0
for v in d.values():
if len(UpperCamelCase_ ) % 2 != 0:
middle += 1
else:
snake_case = 0
for i in range(0 ,len(UpperCamelCase_ ) ):
if v[i] + v[len(UpperCamelCase_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 213 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = UnCLIPImageVariationPipeline
__magic_name__ = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
__magic_name__ = IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
__magic_name__ = False
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return self.time_input_dim
@property
def a_ ( self ):
return self.time_input_dim * 4
@property
def a_ ( self ):
return 1_0_0
@property
def a_ ( self ):
snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__snake_case )
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , )
return CLIPVisionModelWithProjection(__snake_case )
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
snake_case = UnCLIPTextProjModel(**__snake_case )
return model
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = {
'''sample_size''': 3_2,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def a_ ( self ):
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def a_ ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
snake_case = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def a_ ( self ):
snake_case = self.dummy_decoder
snake_case = self.dummy_text_proj
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_super_res_first
snake_case = self.dummy_super_res_last
snake_case = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
snake_case = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
snake_case = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
snake_case = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def a_ ( self , __snake_case , __snake_case=0 , __snake_case=True ):
snake_case = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith('''mps''' ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
if pil_image:
snake_case = input_image * 0.5 + 0.5
snake_case = input_image.clamp(0 , 1 )
snake_case = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
snake_case = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = torch.device('''cpu''' )
class A__ :
"""simple docstring"""
__magic_name__ = 1
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device=__snake_case ).manual_seed(0 )
snake_case = pipe.decoder.dtype
snake_case = 1
snake_case = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
snake_case = pipe.prepare_latents(
__snake_case , dtype=__snake_case , device=__snake_case , generator=__snake_case , latents=__snake_case , scheduler=DummyScheduler() )
snake_case = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
snake_case = pipe.prepare_latents(
__snake_case , dtype=__snake_case , device=__snake_case , generator=__snake_case , latents=__snake_case , scheduler=DummyScheduler() )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , decoder_latents=__snake_case , super_res_latents=__snake_case ).images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
# Don't pass image, instead pass embedding
snake_case = pipeline_inputs.pop('''image''' )
snake_case = pipe.image_encoder(__snake_case ).image_embeds
snake_case = pipe(
**__snake_case , decoder_latents=__snake_case , super_res_latents=__snake_case , image_embeddings=__snake_case , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def a_ ( self ):
snake_case = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
snake_case = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__snake_case , expected_max_diff=__snake_case )
@skip_mps
def a_ ( self ):
snake_case = torch_device == '''cpu'''
snake_case = True
snake_case = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=__snake_case , relax_max_difference=__snake_case , additional_params_copy_to_batched_inputs=__snake_case , )
def a_ ( self ):
snake_case = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
snake_case = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__snake_case , additional_params_copy_to_batched_inputs=__snake_case , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__snake_case )
@skip_mps
def a_ ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a_ ( self ):
return super().test_save_load_local()
@skip_mps
def a_ ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self ):
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
snake_case = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case = pipeline(
__snake_case , generator=__snake_case , output_type='''np''' , )
snake_case = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(__snake_case , __snake_case , 1_5 )
| 213 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 326 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
_UpperCamelCase = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def lowerCAmelCase__( lowercase : str ) -> Optional[Any]:
__snake_case : Optional[int] = torch.load(lowercase , map_location="cpu" )
return sd
def lowerCAmelCase__( lowercase : List[Any] , lowercase : List[Any] , lowercase : List[Any]=rename_keys_prefix ) -> Dict:
__snake_case : Tuple = OrderedDict()
__snake_case : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__snake_case : Optional[Any] = key
for name_pair in rename_keys_prefix:
__snake_case : List[str] = new_key.replace(name_pair[0] , name_pair[1] )
__snake_case : List[str] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__snake_case : List[Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowerCAmelCase__( lowercase : Optional[Any] , lowercase : Any ) -> List[Any]:
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
__snake_case : Any = "pretraining"
if "vcr" in checkpoint_path:
__snake_case : Optional[Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__snake_case : Tuple = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__snake_case : Dict = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__snake_case : Any = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
__snake_case : Dict = {"visual_embedding_dim": 512}
__snake_case : Any = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__snake_case : List[Any] = {"visual_embedding_dim": 2048}
__snake_case : Optional[Any] = "vqa_advanced"
elif "vqa" in checkpoint_path:
__snake_case : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129}
__snake_case : Union[str, Any] = "vqa"
elif "nlvr" in checkpoint_path:
__snake_case : Tuple = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__snake_case : List[Any] = "nlvr"
__snake_case : Union[str, Any] = VisualBertConfig(**lowercase )
# Load State Dict
__snake_case : Any = load_state_dict(lowercase )
__snake_case : Dict = get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
__snake_case : Optional[Any] = VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
__snake_case : Tuple = VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
__snake_case : Tuple = VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
__snake_case : List[Any] = VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
_UpperCamelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 326 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE :List[Any] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
SCREAMING_SNAKE_CASE :int = {
'junnyu/roformer_chinese_small': 1536,
'junnyu/roformer_chinese_base': 1536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
SCREAMING_SNAKE_CASE :Dict = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = RoFormerTokenizer
def __init__( self : Any ,A : List[str]=None ,A : Tuple=None ,A : Dict=True ,A : Any="[UNK]" ,A : List[str]="[SEP]" ,A : Optional[int]="[PAD]" ,A : Union[str, Any]="[CLS]" ,A : str="[MASK]" ,A : Any=True ,A : List[str]=None ,**A : Tuple ,):
super().__init__(
A ,tokenizer_file=A ,do_lower_case=A ,unk_token=A ,sep_token=A ,pad_token=A ,cls_token=A ,mask_token=A ,tokenize_chinese_chars=A ,strip_accents=A ,**A ,)
__A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" ,A ) != do_lower_case
or pre_tok_state.get("strip_accents" ,A ) != strip_accents
):
__A = getattr(A ,pre_tok_state.pop("type" ) )
__A = do_lower_case
__A = strip_accents
__A = pre_tok_class(**A )
__A = do_lower_case
def __getstate__( self : List[Any] ):
__A = self.__dict__.copy()
__A = BertPreTokenizer()
return state
def __setstate__( self : Tuple ,A : Optional[int] ):
__A = d
__A = self.__dict__["_tokenizer"].get_vocab()
__A = PreTokenizer.custom(JiebaPreTokenizer(A ) )
def UpperCamelCase_ ( self : Union[str, Any] ,A : Union[str, Any] ,A : str=None ):
__A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Dict ,A : List[int] ,A : Optional[List[int]] = None ):
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Tuple ,A : str ,A : Optional[str] = None ):
__A = self._tokenizer.model.save(A ,name=A )
return tuple(A )
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[str] ,A : int=None ,A : Union[str, Any]=None ,A : Dict=False ,**A : Tuple ,):
__A = BertPreTokenizer()
return super().save_pretrained(A ,A ,A ,A ,**A )
| 124 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
SCREAMING_SNAKE_CASE :Tuple = 'pytorch_model.bin'
SCREAMING_SNAKE_CASE :str = 'pytorch_model.bin.index.json'
SCREAMING_SNAKE_CASE :int = 'adapter_config.json'
SCREAMING_SNAKE_CASE :List[str] = 'adapter_model.bin'
SCREAMING_SNAKE_CASE :Any = 'adapter_model.safetensors'
SCREAMING_SNAKE_CASE :int = 'tf_model.h5'
SCREAMING_SNAKE_CASE :Tuple = 'tf_model.h5.index.json'
SCREAMING_SNAKE_CASE :List[Any] = 'model.ckpt'
SCREAMING_SNAKE_CASE :Optional[int] = 'flax_model.msgpack'
SCREAMING_SNAKE_CASE :List[Any] = 'flax_model.msgpack.index.json'
SCREAMING_SNAKE_CASE :List[Any] = 'model.safetensors'
SCREAMING_SNAKE_CASE :Any = 'model.safetensors.index.json'
SCREAMING_SNAKE_CASE :int = 'config.json'
SCREAMING_SNAKE_CASE :List[str] = 'preprocessor_config.json'
SCREAMING_SNAKE_CASE :Optional[int] = FEATURE_EXTRACTOR_NAME
SCREAMING_SNAKE_CASE :Optional[Any] = 'generation_config.json'
SCREAMING_SNAKE_CASE :Dict = 'modelcard.json'
SCREAMING_SNAKE_CASE :Optional[Any] = '▁'
SCREAMING_SNAKE_CASE :Any = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
SCREAMING_SNAKE_CASE :Tuple = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
SCREAMING_SNAKE_CASE :Union[str, Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
SCREAMING_SNAKE_CASE :Tuple = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
if version.parse(a_ ) < version.parse(a_ ):
if "dev" in min_version:
__A = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
__A = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 124 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : List[Any] ):
_UpperCAmelCase : Union[str, Any] = []
def _A ( self : Any , A : Union[str, Any] , A : Optional[int] , A : List[str] , **A : Tuple ):
self.events.append("on_init_end" )
def _A ( self : Any , A : str , A : List[Any] , A : List[Any] , **A : Tuple ):
self.events.append("on_train_begin" )
def _A ( self : Tuple , A : List[str] , A : Tuple , A : int , **A : List[str] ):
self.events.append("on_train_end" )
def _A ( self : Optional[Any] , A : Dict , A : Any , A : Optional[Any] , **A : List[Any] ):
self.events.append("on_epoch_begin" )
def _A ( self : Optional[Any] , A : List[Any] , A : List[str] , A : Optional[int] , **A : Optional[int] ):
self.events.append("on_epoch_end" )
def _A ( self : List[str] , A : Optional[int] , A : List[Any] , A : Union[str, Any] , **A : Any ):
self.events.append("on_step_begin" )
def _A ( self : Tuple , A : Union[str, Any] , A : int , A : Optional[int] , **A : int ):
self.events.append("on_step_end" )
def _A ( self : Optional[int] , A : Optional[Any] , A : Union[str, Any] , A : str , **A : Union[str, Any] ):
self.events.append("on_evaluate" )
def _A ( self : Optional[Any] , A : Optional[int] , A : Dict , A : List[Any] , **A : Dict ):
self.events.append("on_predict" )
def _A ( self : Dict , A : Dict , A : List[Any] , A : Dict , **A : str ):
self.events.append("on_save" )
def _A ( self : Tuple , A : Optional[Any] , A : Union[str, Any] , A : Optional[int] , **A : Dict ):
self.events.append("on_log" )
def _A ( self : Optional[int] , A : Optional[Any] , A : Tuple , A : Tuple , **A : List[str] ):
self.events.append("on_prediction_step" )
@require_torch
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[int] ):
_UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
def _A ( self : List[Any] ):
shutil.rmtree(self.output_dir )
def _A ( self : Union[str, Any] , A : Optional[int]=0 , A : Optional[Any]=0 , A : Optional[Any]=64 , A : Dict=64 , A : Any=None , A : Tuple=False , **A : Optional[int] ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
_UpperCAmelCase : str = RegressionDataset(length=A )
_UpperCAmelCase : Union[str, Any] = RegressionDataset(length=A )
_UpperCAmelCase : Any = RegressionModelConfig(a=A , b=A )
_UpperCAmelCase : List[Any] = RegressionPreTrainedModel(A )
_UpperCAmelCase : Dict = TrainingArguments(self.output_dir , disable_tqdm=A , report_to=[] , **A )
return Trainer(
A , A , train_dataset=A , eval_dataset=A , callbacks=A , )
def _A ( self : str , A : List[str] , A : List[str] ):
self.assertEqual(len(A ) , len(A ) )
# Order doesn't matter
_UpperCAmelCase : Tuple = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ )
_UpperCAmelCase : Any = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ )
for cba, cba in zip(A , A ):
if isinstance(A , A ) and isinstance(A , A ):
self.assertEqual(A , A )
elif isinstance(A , A ) and not isinstance(A , A ):
self.assertEqual(A , cba.__class__ )
elif not isinstance(A , A ) and isinstance(A , A ):
self.assertEqual(cba.__class__ , A )
else:
self.assertEqual(A , A )
def _A ( self : int , A : List[str] ):
_UpperCAmelCase : List[str] = ["on_init_end", "on_train_begin"]
_UpperCAmelCase : str = 0
_UpperCAmelCase : Optional[Any] = len(trainer.get_eval_dataloader() )
_UpperCAmelCase : Optional[int] = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(A ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _A ( self : str ):
_UpperCAmelCase : Any = self.get_trainer()
_UpperCAmelCase : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# Callbacks passed at init are added to the default callbacks
_UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
_UpperCAmelCase : List[Any] = self.get_trainer(disable_tqdm=A )
_UpperCAmelCase : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
_UpperCAmelCase : Dict = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(A )
expected_callbacks.remove(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
_UpperCAmelCase : Optional[Any] = self.get_trainer()
_UpperCAmelCase : Any = trainer.pop_callback(A )
self.assertEqual(cb.__class__ , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
trainer.add_callback(A )
expected_callbacks.insert(0 , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# We can also add, pop, or remove by instance
_UpperCAmelCase : Union[str, Any] = self.get_trainer()
_UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(A )
expected_callbacks.remove(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
_UpperCAmelCase : List[Any] = self.get_trainer()
_UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[0]
_UpperCAmelCase : Union[str, Any] = trainer.pop_callback(A )
self.assertEqual(A , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
trainer.add_callback(A )
expected_callbacks.insert(0 , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
def _A ( self : Optional[Any] ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=A )
_UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
_UpperCAmelCase : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# Independent log/save/eval
_UpperCAmelCase : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
_UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
_UpperCAmelCase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
_UpperCAmelCase : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
_UpperCAmelCase : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
_UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
_UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
_UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# A bit of everything
_UpperCAmelCase : int = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
_UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
_UpperCAmelCase : Optional[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(A ) in warn_mock.call_args[0][0]
| 31 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _A (lowerCAmelCase__ :Tuple ) -> Tuple:
'''simple docstring'''
_a = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_a = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_a = 4
_a = 48
_a = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_a = [6, 6, 6, 6]
_a = 60
_a = [6, 6, 6, 6]
_a = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_a = 4
_a = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_a = 1
_a = 1
_a = 1_26
_a = 7
_a = 2_5_5.0
_a = ''
return config
def _A (lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict ) -> Optional[Any]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
_a = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_a = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_a = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
_a = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
_a = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_a = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_a = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_a = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_a = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_a = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
_a = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
_a = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
_a = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
_a = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_a = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
_a = 'layernorm.weight'
if name == "norm.bias":
_a = 'layernorm.bias'
if "conv_first" in name:
_a = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_a = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_a = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
_a = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
_a = name.replace('upsample.2' , 'upsample.convolution_1' )
_a = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_a = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
_a = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
_a = 'swin2sr.' + name
return name
def _A (lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
_a = key.split('.' )
_a = int(key_split[1] )
_a = int(key_split[4] )
_a = config.embed_dim
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
pass
else:
_a = val
return orig_state_dict
def _A (lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_a = get_config(UpperCamelCase__ )
_a = SwinaSRForImageSuperResolution(UpperCamelCase__ )
model.eval()
_a = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )
_a = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
_a , _a = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(UpperCamelCase__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'Unexpected key {key} in state_dict' )
# verify values
_a = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_a = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('RGB' )
_a = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_a = 1_26 if 'Jpeg' in checkpoint_url else 2_56
_a = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_a = transforms(UpperCamelCase__ ).unsqueeze(0 )
if config.num_channels == 1:
_a = pixel_values[:, 0, :, :].unsqueeze(1 )
_a = model(UpperCamelCase__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_a = torch.Size([1, 3, 5_12, 5_12] )
_a = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_a = torch.Size([1, 3, 10_24, 10_24] )
_a = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_a = torch.Size([1, 3, 10_24, 10_24] )
_a = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_a = torch.Size([1, 3, 5_12, 5_12] )
_a = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_a = torch.Size([1, 3, 10_24, 10_24] )
_a = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , UpperCamelCase__ , atol=1E-3 )
print('Looks ok!' )
_a = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_a = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub(f'caidas/{model_name}' )
processor.push_to_hub(f'caidas/{model_name}' )
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
a_ : str = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 363 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a_ : List[Any] = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
a_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 104 | 0 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=_UpperCAmelCase ):
_UpperCamelCase = ["""note_seq"""]
def __init__( self , *A_ , **A_ ) ->Any:
'''simple docstring'''
requires_backends(self , ['''note_seq'''] )
@classmethod
def UpperCamelCase__ ( cls , *A_ , **A_ ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ['''note_seq'''] )
@classmethod
def UpperCamelCase__ ( cls , *A_ , **A_ ) ->int:
'''simple docstring'''
requires_backends(cls , ['''note_seq'''] )
| 275 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_UpperCamelCase = random.Random()
def _lowercase ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ):
if rng is None:
__lowerCAmelCase : Optional[Any] = global_rng
__lowerCAmelCase : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowercase (unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = parent
__lowerCAmelCase : Optional[int] = batch_size
__lowerCAmelCase : Any = min_seq_length
__lowerCAmelCase : Tuple = max_seq_length
__lowerCAmelCase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase : Dict = feature_size
__lowerCAmelCase : Optional[int] = padding_value
__lowerCAmelCase : Tuple = sampling_rate
__lowerCAmelCase : Union[str, Any] = return_attention_mask
__lowerCAmelCase : Dict = do_normalize
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self , A_=False , A_=False ) ->Union[str, Any]:
'''simple docstring'''
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__lowerCAmelCase : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__lowerCAmelCase : Tuple = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCAmelCase : Tuple = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
class __lowercase (_UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = WavaVecaFeatureExtractor
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = WavaVecaFeatureExtractionTester(self )
def UpperCamelCase__ ( self , A_ ) ->Optional[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(A_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ , axis=0 ) - 1 ) < 1e-3 ) )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : Any = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test not batched input
__lowerCAmelCase : Optional[Any] = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
__lowerCAmelCase : Dict = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test batched
__lowerCAmelCase : Dict = feat_extract(A_ , return_tensors='''np''' ).input_values
__lowerCAmelCase : Dict = feat_extract(A_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCAmelCase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowerCAmelCase : List[Any] = np.asarray(A_ )
__lowerCAmelCase : Any = feat_extract(A_ , return_tensors='''np''' ).input_values
__lowerCAmelCase : Union[str, Any] = feat_extract(A_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : str = ['''longest''', '''max_length''', '''do_not_pad''']
__lowerCAmelCase : str = [None, 1600, None]
for max_length, padding in zip(A_ , A_ ):
__lowerCAmelCase : Optional[int] = feat_extract(A_ , padding=A_ , max_length=A_ , return_tensors='''np''' )
__lowerCAmelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : Optional[int] = range(800 , 1400 , 200 )
__lowerCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in lengths]
__lowerCAmelCase : int = ['''longest''', '''max_length''', '''do_not_pad''']
__lowerCAmelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(A_ , A_ ):
__lowerCAmelCase : Union[str, Any] = feat_extract(A_ , max_length=A_ , padding=A_ )
__lowerCAmelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : List[str] = feat_extract(
A_ , truncation=A_ , max_length=1000 , padding='''max_length''' , return_tensors='''np''' )
__lowerCAmelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : int = feat_extract(
A_ , truncation=A_ , max_length=1000 , padding='''longest''' , return_tensors='''np''' )
__lowerCAmelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
__lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : Optional[int] = feat_extract(
A_ , truncation=A_ , max_length=2000 , padding='''longest''' , return_tensors='''np''' )
__lowerCAmelCase : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
import torch
__lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : Any = np.random.rand(100 ).astype(np.floataa )
__lowerCAmelCase : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCAmelCase : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__lowerCAmelCase : List[str] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
__lowerCAmelCase : Any = WavaVecaConfig.from_pretrained(A_ )
__lowerCAmelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained(A_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 275 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCAmelCase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCamelCase = [image]
UpperCamelCase = [trans(img.convert("RGB" ) ) for img in image]
UpperCamelCase = torch.stack(_SCREAMING_SNAKE_CASE )
return image
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a , __a ) -> Union[str, Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__a , scheduler=__a )
def snake_case_ (self , __a ) -> Dict:
if strength < 0 or strength > 1:
raise ValueError(F"The value of strength should in [0.0, 1.0] but is {strength}" )
def snake_case_ (self , __a , __a , __a ) -> Dict:
# get the original timestep using init_timestep
UpperCamelCase = min(int(num_inference_steps * strength ) , __a )
UpperCamelCase = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case_ (self , __a , __a , __a , __a , __a , __a=None ) -> str:
if not isinstance(__a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__a )}" )
UpperCamelCase = image.to(device=__a , dtype=__a )
if isinstance(__a , __a ) and len(__a ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(__a )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
UpperCamelCase = init_latents.shape
UpperCamelCase = randn_tensor(__a , generator=__a , device=__a , dtype=__a )
# get latents
print("add noise to latents at timestep" , __a )
UpperCamelCase = self.scheduler.add_noise(__a , __a , __a )
UpperCamelCase = init_latents
return latents
@torch.no_grad()
def __call__(self , __a = None , __a = 0.8 , __a = 1 , __a = None , __a = 0.0 , __a = 50 , __a = None , __a = "pil" , __a = True , ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(__a )
# 2. Preprocess image
UpperCamelCase = preprocess(__a )
# 3. set timesteps
self.scheduler.set_timesteps(__a , device=self.device )
UpperCamelCase , UpperCamelCase = self.get_timesteps(__a , __a , self.device )
UpperCamelCase = timesteps[:1].repeat(__a )
# 4. Prepare latent variables
UpperCamelCase = self.prepare_latents(__a , __a , __a , self.unet.dtype , self.device , __a )
UpperCamelCase = latents
# 5. Denoising loop
for t in self.progress_bar(__a ):
# 1. predict noise model_output
UpperCamelCase = self.unet(__a , __a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase = self.scheduler.step(
__a , __a , __a , eta=__a , use_clipped_model_output=__a , generator=__a , ).prev_sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(__a )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__a )
| 362 |
"""simple docstring"""
from statistics import mean, stdev
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 3 ):
"""simple docstring"""
UpperCamelCase = min(_SCREAMING_SNAKE_CASE )
UpperCamelCase = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data]
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 3 ):
"""simple docstring"""
UpperCamelCase = mean(_SCREAMING_SNAKE_CASE )
UpperCamelCase = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
| 244 | 0 |
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return number | (1 << position)
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return number & ~(1 << position)
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return number ^ (1 << position)
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*A_ : List[str] ,**A_ : int ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' ,A_ ,)
super().__init__(*A_ ,**A_ ) | 74 | 0 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCAmelCase :
def __init__( self: Any , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[int]=2 , _lowerCAmelCase: Optional[Any]=3 , _lowerCAmelCase: int=4 , _lowerCAmelCase: str=2 , _lowerCAmelCase: List[Any]=7 , _lowerCAmelCase: str=True , _lowerCAmelCase: Optional[Any]=True , _lowerCAmelCase: int=True , _lowerCAmelCase: Dict=True , _lowerCAmelCase: Dict=99 , _lowerCAmelCase: List[str]=36 , _lowerCAmelCase: Tuple=2 , _lowerCAmelCase: int=4 , _lowerCAmelCase: Dict=37 , _lowerCAmelCase: List[str]="gelu" , _lowerCAmelCase: List[str]=0.1 , _lowerCAmelCase: Tuple=0.1 , _lowerCAmelCase: Union[str, Any]=5_12 , _lowerCAmelCase: Tuple=16 , _lowerCAmelCase: List[Any]=2 , _lowerCAmelCase: Dict=0.02 , _lowerCAmelCase: Optional[Any]=6 , _lowerCAmelCase: Dict=6 , _lowerCAmelCase: str=3 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: Optional[int]=None , _lowerCAmelCase: Optional[int]=10_00 , ):
lowercase :List[str] = parent
lowercase :Optional[int] = batch_size
lowercase :int = num_channels
lowercase :Optional[Any] = image_size
lowercase :str = patch_size
lowercase :Tuple = is_training
lowercase :Union[str, Any] = use_input_mask
lowercase :int = use_token_type_ids
lowercase :Union[str, Any] = use_labels
lowercase :List[Any] = vocab_size
lowercase :Optional[Any] = hidden_size
lowercase :Optional[int] = num_hidden_layers
lowercase :Optional[Any] = num_attention_heads
lowercase :List[str] = intermediate_size
lowercase :Optional[Any] = hidden_act
lowercase :List[str] = hidden_dropout_prob
lowercase :Any = attention_probs_dropout_prob
lowercase :Any = max_position_embeddings
lowercase :Optional[Any] = type_vocab_size
lowercase :str = type_sequence_label_size
lowercase :List[str] = initializer_range
lowercase :Any = coordinate_size
lowercase :Dict = shape_size
lowercase :List[str] = num_labels
lowercase :Any = num_choices
lowercase :Tuple = scope
lowercase :Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowercase :Optional[int] = text_seq_length
lowercase :Any = (image_size // patch_size) ** 2 + 1
lowercase :Union[str, Any] = self.text_seq_length + self.image_seq_length
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowercase :str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
lowercase :List[str] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase :List[str] = bbox[i, j, 3]
lowercase :Optional[int] = bbox[i, j, 1]
lowercase :Optional[int] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase :str = bbox[i, j, 2]
lowercase :Union[str, Any] = bbox[i, j, 0]
lowercase :Optional[Any] = tmp_coordinate
lowercase :str = tf.constant(_lowerCAmelCase )
lowercase :Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase :str = None
if self.use_input_mask:
lowercase :Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
lowercase :Dict = None
if self.use_token_type_ids:
lowercase :int = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowercase :Optional[int] = None
lowercase :Any = None
if self.use_labels:
lowercase :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase :List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowercase :Tuple = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: List[str] , _lowerCAmelCase: Any , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: List[Any] ):
lowercase :List[str] = TFLayoutLMvaModel(config=_lowerCAmelCase )
# text + image
lowercase :int = model(_lowerCAmelCase , pixel_values=_lowerCAmelCase , training=_lowerCAmelCase )
lowercase :List[Any] = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , training=_lowerCAmelCase , )
lowercase :Optional[int] = model(_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowercase :Any = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowercase :Union[str, Any] = model({"pixel_values": pixel_values} , training=_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: List[str] , _lowerCAmelCase: Dict , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: str , _lowerCAmelCase: List[str] , _lowerCAmelCase: int ):
lowercase :str = self.num_labels
lowercase :Union[str, Any] = TFLayoutLMvaForSequenceClassification(config=_lowerCAmelCase )
lowercase :List[str] = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Any , _lowerCAmelCase: str , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Tuple ):
lowercase :Any = self.num_labels
lowercase :str = TFLayoutLMvaForTokenClassification(config=_lowerCAmelCase )
lowercase :Any = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int , _lowerCAmelCase: str , _lowerCAmelCase: Optional[Any] ):
lowercase :Union[str, Any] = 2
lowercase :List[Any] = TFLayoutLMvaForQuestionAnswering(config=_lowerCAmelCase )
lowercase :int = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , training=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :Tuple = self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) :Any = config_and_inputs
lowercase :Dict = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
_a = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_a = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_a = False
_a = False
_a = False
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: str , _lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: List[Any] ):
return True
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: List[Any]=False ):
lowercase :List[Any] = copy.deepcopy(_lowerCAmelCase )
if model_class in get_values(_lowerCAmelCase ):
lowercase :int = {
k: tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_lowerCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
lowercase :int = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_lowerCAmelCase ):
lowercase :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
lowercase :Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_lowerCAmelCase ):
lowercase :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_lowerCAmelCase ):
lowercase :Tuple = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :int = TFLayoutLMvaModelTester(self )
lowercase :str = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase , lowercase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Dict = model_class(_lowerCAmelCase )
if getattr(_lowerCAmelCase , "hf_compute_loss" , _lowerCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
lowercase :int = self._prepare_for_class(inputs_dict.copy() , _lowerCAmelCase , return_labels=_lowerCAmelCase )
lowercase :Dict = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_lowerCAmelCase )[0]
]
lowercase :Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
lowercase :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _lowerCAmelCase , return_labels=_lowerCAmelCase )
lowercase :str = prepared_for_class.pop("input_ids" )
lowercase :int = model(_lowerCAmelCase , **_lowerCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
lowercase :str = self._prepare_for_class(inputs_dict.copy() , _lowerCAmelCase , return_labels=_lowerCAmelCase )
lowercase :Optional[Any] = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
lowercase :Union[str, Any] = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
lowercase :str = -1_00
lowercase :List[str] = tf.convert_to_tensor(_lowerCAmelCase )
lowercase :Union[str, Any] = model(_lowerCAmelCase , **_lowerCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
lowercase :str = self._prepare_for_class(inputs_dict.copy() , _lowerCAmelCase , return_labels=_lowerCAmelCase )
lowercase :int = model(_lowerCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
lowercase :List[Any] = self._prepare_for_class(inputs_dict.copy() , _lowerCAmelCase , return_labels=_lowerCAmelCase )
# Get keys that were added with the _prepare_for_class function
lowercase :str = prepared_for_class.keys() - inputs_dict.keys()
lowercase :Any = inspect.signature(model.call ).parameters
lowercase :str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
lowercase :Optional[int] = {0: "input_ids"}
for label_key in label_keys:
lowercase :Optional[int] = signature_names.index(_lowerCAmelCase )
lowercase :Optional[Any] = label_key
lowercase :List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
lowercase :Optional[int] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
lowercase :Optional[Any] = prepared_for_class[value]
lowercase :Union[str, Any] = tuple(_lowerCAmelCase )
# Send to model
lowercase :int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def SCREAMING_SNAKE_CASE ( self: Any ):
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Dict ):
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) :Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase :List[Any] = type
self.model_tester.create_and_check_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: List[str] ):
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self: str ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase :Any = TFLayoutLMvaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def UpperCAmelCase__ ( ):
lowercase :List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class __lowerCAmelCase ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self: List[str] ):
return LayoutLMvaImageProcessor(apply_ocr=_lowerCAmelCase ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
lowercase :Optional[int] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
lowercase :Optional[int] = self.default_image_processor
lowercase :Optional[int] = prepare_img()
lowercase :int = image_processor(images=_lowerCAmelCase , return_tensors="tf" ).pixel_values
lowercase :Optional[Any] = tf.constant([[1, 2]] )
lowercase :Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
lowercase :Optional[int] = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
lowercase :Optional[int] = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , _lowerCAmelCase )
lowercase :Optional[int] = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 158 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : str = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_UpperCAmelCase : List[str] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_UpperCAmelCase : Optional[Any] = {
"allenai/led-base-16384": 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def UpperCAmelCase__ ( ):
lowercase :int = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
lowercase :Dict = bs[:]
lowercase :List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase )
cs.append(2**8 + n )
n += 1
lowercase :List[str] = [chr(lowerCamelCase ) for n in cs]
return dict(zip(lowerCamelCase, lowerCamelCase ) )
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :List[Any] = set()
lowercase :Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase :List[str] = char
return pairs
class __lowerCAmelCase ( lowerCAmelCase):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['''input_ids''', '''attention_mask''']
def __init__( self: Optional[int] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Tuple , _lowerCAmelCase: Union[str, Any]="replace" , _lowerCAmelCase: int="<s>" , _lowerCAmelCase: int="</s>" , _lowerCAmelCase: int="</s>" , _lowerCAmelCase: Optional[int]="<s>" , _lowerCAmelCase: Optional[int]="<unk>" , _lowerCAmelCase: Any="<pad>" , _lowerCAmelCase: Optional[Any]="<mask>" , _lowerCAmelCase: Union[str, Any]=False , **_lowerCAmelCase: Dict , ):
lowercase :Union[str, Any] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else bos_token
lowercase :List[str] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else eos_token
lowercase :Any = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else sep_token
lowercase :Tuple = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else cls_token
lowercase :List[Any] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else unk_token
lowercase :str = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase :Tuple = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
super().__init__(
errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
with open(_lowerCAmelCase , encoding="utf-8" ) as vocab_handle:
lowercase :List[str] = json.load(_lowerCAmelCase )
lowercase :Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowercase :Dict = errors # how to handle errors in decoding
lowercase :Any = bytes_to_unicode()
lowercase :str = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCAmelCase , encoding="utf-8" ) as merges_handle:
lowercase :List[Any] = merges_handle.read().split("\n" )[1:-1]
lowercase :Tuple = [tuple(merge.split() ) for merge in bpe_merges]
lowercase :Optional[int] = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
lowercase :Tuple = {}
lowercase :List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase :Optional[int] = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def SCREAMING_SNAKE_CASE ( self: int ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self: Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: Optional[int] ):
if token in self.cache:
return self.cache[token]
lowercase :Tuple = tuple(_lowerCAmelCase )
lowercase :List[str] = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
lowercase :List[str] = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase :List[Any] = bigram
lowercase :str = []
lowercase :Tuple = 0
while i < len(_lowerCAmelCase ):
try:
lowercase :List[str] = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase :Optional[Any] = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase :Dict = tuple(_lowerCAmelCase )
lowercase :Optional[Any] = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
lowercase :List[str] = get_pairs(_lowerCAmelCase )
lowercase :str = " ".join(_lowerCAmelCase )
lowercase :Optional[Any] = word
return word
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Optional[int] ):
lowercase :str = []
for token in re.findall(self.pat , _lowerCAmelCase ):
lowercase :str = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCAmelCase ).split(" " ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: str ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: Tuple ):
return self.decoder.get(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: List[str] ):
lowercase :Optional[int] = "".join(_lowerCAmelCase )
lowercase :List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase :List[str] = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase :Optional[int] = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + "\n" )
lowercase :Tuple = 0
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
lowercase :Tuple = token_index
writer.write(" ".join(_lowerCAmelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase :Tuple = [self.cls_token_id]
lowercase :int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None , _lowerCAmelCase: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ):
lowercase :List[str] = [self.sep_token_id]
lowercase :Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: int=False , **_lowerCAmelCase: Dict ):
lowercase :Tuple = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCAmelCase ) > 0 and not text[0].isspace()):
lowercase :List[Any] = " " + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: Union[Dict[str, EncodedInput], BatchEncoding] , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: Optional[bool] = None , ):
lowercase :Tuple = super()._pad(
encoded_inputs=_lowerCAmelCase , max_length=_lowerCAmelCase , padding_strategy=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase :Union[str, Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase :Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase :Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(_lowerCAmelCase )
if needs_to_be_padded:
lowercase :Optional[int] = len(_lowerCAmelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase :List[Any] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowercase :int = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 158 | 1 |
"""simple docstring"""
def lowercase ( __snake_case : int = 1_0_0_0 ):
lowercase_ , lowercase_ : str = 1, 1
lowercase_ : List[str] = 2
while True:
lowercase_ : Tuple = 0
lowercase_ : List[Any] = fa + fa
lowercase_ , lowercase_ : Optional[int] = fa, f
index += 1
for _ in str(__snake_case ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 33 |
"""simple docstring"""
def lowercase ( __snake_case : int = 1_0_0_0 ):
lowercase_ , lowercase_ : str = 1, 1
lowercase_ : List[str] = 2
while True:
lowercase_ : Tuple = 0
lowercase_ : List[Any] = fa + fa
lowercase_ , lowercase_ : Optional[int] = fa, f
index += 1
for _ in str(__snake_case ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 33 | 1 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa )
__lowerCamelCase , __lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=lowerCamelCase__ , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa )
__lowerCamelCase = controlnet_params
__lowerCamelCase = 'bird'
__lowerCamelCase = jax.device_count()
__lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
__lowerCamelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
__lowerCamelCase = jax.random.PRNGKey(0 )
__lowerCamelCase = jax.random.split(lowerCamelCase__ , jax.device_count() )
__lowerCamelCase = replicate(lowerCamelCase__ )
__lowerCamelCase = shard(lowerCamelCase__ )
__lowerCamelCase = shard(lowerCamelCase__ )
__lowerCamelCase = pipe(
prompt_ids=lowerCamelCase__ , image=lowerCamelCase__ , params=lowerCamelCase__ , prng_seed=lowerCamelCase__ , num_inference_steps=50 , jit=lowerCamelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowerCamelCase = images[0, 253:256, 253:256, -1]
__lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowerCamelCase = jnp.array(
[0.16_79_69, 0.11_66_99, 0.08_15_43, 0.15_42_97, 0.13_28_12, 0.10_88_87, 0.16_99_22, 0.16_99_22, 0.20_50_78] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa )
__lowerCamelCase , __lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=lowerCamelCase__ , from_pt=lowerCamelCase__ , dtype=jnp.bfloataa )
__lowerCamelCase = controlnet_params
__lowerCamelCase = 'Chef in the kitchen'
__lowerCamelCase = jax.device_count()
__lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
__lowerCamelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
__lowerCamelCase = jax.random.PRNGKey(0 )
__lowerCamelCase = jax.random.split(lowerCamelCase__ , jax.device_count() )
__lowerCamelCase = replicate(lowerCamelCase__ )
__lowerCamelCase = shard(lowerCamelCase__ )
__lowerCamelCase = shard(lowerCamelCase__ )
__lowerCamelCase = pipe(
prompt_ids=lowerCamelCase__ , image=lowerCamelCase__ , params=lowerCamelCase__ , prng_seed=lowerCamelCase__ , num_inference_steps=50 , jit=lowerCamelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowerCamelCase = images[0, 253:256, 253:256, -1]
__lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowerCamelCase = jnp.array(
[[0.27_14_84, 0.26_17_19, 0.27_53_91, 0.27_73_44, 0.27_92_97, 0.29_10_16, 0.29_49_22, 0.30_27_34, 0.30_27_34]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 348 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=32 * 8 , lowerCamelCase__=32 * 8 , lowerCamelCase__=4 , lowerCamelCase__=64 , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = is_training
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = num_queries
__lowerCamelCase = num_channels
__lowerCamelCase = min_size
__lowerCamelCase = max_size
__lowerCamelCase = num_labels
__lowerCamelCase = hidden_dim
__lowerCamelCase = hidden_dim
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
__lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
__lowerCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
__lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
__lowerCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__lowerCamelCase = self.num_queries
__lowerCamelCase = self.num_labels
__lowerCamelCase = [1, 1, 1, 1]
__lowerCamelCase = self.num_channels
__lowerCamelCase = 64
__lowerCamelCase = 128
__lowerCamelCase = self.hidden_dim
__lowerCamelCase = self.hidden_dim
__lowerCamelCase = self.hidden_dim
return config
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = output.encoder_hidden_states
__lowerCamelCase = output.pixel_decoder_hidden_states
__lowerCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
__lowerCamelCase = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
__lowerCamelCase = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case_ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__lowerCamelCase = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = (self.model_tester.min_size,) * 2
__lowerCamelCase = {
'pixel_values': torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
'mask_labels': torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
'class_labels': torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
__lowerCamelCase = self.model_tester.get_config()
__lowerCamelCase = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
__lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
__lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
__lowerCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A = 1e-4
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
__lowerCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
__lowerCamelCase = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowerCamelCase = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowerCamelCase = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
__lowerCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
# masks_queries_logits
__lowerCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__lowerCamelCase = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
__lowerCamelCase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
__lowerCamelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
__lowerCamelCase = inputs['pixel_values'].to(lowerCamelCase__ )
__lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['mask_labels']]
__lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['class_labels']]
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 348 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
A__ : List[str] = logging.get_logger(__name__)
@dataclass
class __snake_case ( SCREAMING_SNAKE_CASE_ ):
_a = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : List[str] , **A_ : str):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCAmelCase_ : Optional[Any] = deprecated_arg[3:]
setattr(self , lowercase_ , not kwargs.pop(lowercase_))
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""")
lowerCAmelCase_ : Optional[int] = kwargs.pop('''torchscript''' , self.torchscript)
lowerCAmelCase_ : Any = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics)
lowerCAmelCase_ : Tuple = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level)
super().__init__(**lowercase_)
_a = field(default=SCREAMING_SNAKE_CASE_ ,metadata={'''help''': '''Trace the models using torchscript'''} )
_a = field(default=SCREAMING_SNAKE_CASE_ ,metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
_a = field(
default='''O1''' ,metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} ,)
@cached_property
def UpperCAmelCase__ ( self : Any):
requires_backends(self , ['''torch'''])
logger.info('''PyTorch: setting up devices''')
if not self.cuda:
lowerCAmelCase_ : Union[str, Any] = torch.device('''cpu''')
lowerCAmelCase_ : str = 0
elif is_torch_tpu_available():
lowerCAmelCase_ : List[str] = xm.xla_device()
lowerCAmelCase_ : List[Any] = 0
else:
lowerCAmelCase_ : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''')
lowerCAmelCase_ : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase__ ( self : Tuple):
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase__ ( self : List[str]):
requires_backends(self , ['''torch'''])
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase__ ( self : Tuple):
requires_backends(self , ['''torch'''])
return self._setup_devices[0]
@property
def UpperCAmelCase__ ( self : Dict):
requires_backends(self , ['''torch'''])
return self._setup_devices[1]
@property
def UpperCAmelCase__ ( self : Optional[int]):
return self.n_gpu > 0
| 103 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
"""simple docstring"""
def UpperCAmelCase__ ( self :Any ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase__ ( self :List[Any] ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase__ ( self :List[str] ) -> str:
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = inputs['prompt']
UpperCAmelCase = inputs['generator']
UpperCAmelCase = inputs['num_inference_steps']
UpperCAmelCase = inputs['output_type']
if "image" in inputs:
UpperCAmelCase = inputs['image']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['mask_image']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['original_image']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(lowercase_ )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
UpperCAmelCase = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowercase_ , lowercase_ ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = inputs['generator']
UpperCAmelCase = inputs['num_inference_steps']
UpperCAmelCase = inputs['output_type']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**lowercase_ )[0]
UpperCAmelCase = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1E-4 )
def UpperCAmelCase__ ( self :List[Any] ) -> str:
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
UpperCAmelCase = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(lowercase_ )
UpperCAmelCase = pipe_loaded(**lowercase_ )[0]
UpperCAmelCase = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1E-4 )
| 78 | 0 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowerCAmelCase : List[str] =np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowerCAmelCase : Tuple =[0, 25, 50]
__lowerCAmelCase : Any =[25, 50, 75]
__lowerCAmelCase : List[Any] =fuzz.membership.trimf(X, abca)
__lowerCAmelCase : Optional[Any] =fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowerCAmelCase : Tuple =np.ones(75)
__lowerCAmelCase : Dict =np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowerCAmelCase : List[str] =fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowerCAmelCase : List[str] =fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowerCAmelCase : Union[str, Any] =fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowerCAmelCase : Tuple =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowerCAmelCase : int =young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowerCAmelCase : List[Any] =young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowerCAmelCase : str =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowerCAmelCase : Union[str, Any] =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 358 |
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : int ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
A__ = F"Input value of [number={number}] must be an integer"
raise TypeError(_lowerCamelCase )
if number < 1:
A__ = F"Input value of [number={number}] must be > 0"
raise ValueError(_lowerCamelCase )
A__ = 1
for i in range(1 , _lowerCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _a ( UpperCamelCase__ ):
def __init__( self: str , UpperCamelCase_: str , UpperCamelCase_: int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self: int , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: float = 0.0 , UpperCamelCase_: int = 50 , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(self.unet.config.sample_size , UpperCamelCase_ ):
lowercase__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase__ = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase__ = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
lowercase__ = (image / 2 + 0.5).clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 110 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__A =None
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__A ={
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
__A =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = MBartTokenizer
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , **lowercase , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , **lowercase , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCamelCase_ = {
lang_code: self.convert_tokens_to_ids(lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ = src_lang if src_lang is not None else "en_XX"
lowerCamelCase_ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , **lowercase ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase )
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,)
| 19 | 0 |
"""simple docstring"""
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def a_ ( _lowercase ):
_UpperCamelCase : int = tmp_path / '''file.csv'''
_UpperCamelCase : Optional[Any] = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(_lowercase , '''w''' ) as f:
f.write(_lowercase )
return str(_lowercase )
@pytest.fixture
def a_ ( _lowercase ):
_UpperCamelCase : str = tmp_path / '''malformed_file.csv'''
_UpperCamelCase : List[str] = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(_lowercase , '''w''' ) as f:
f.write(_lowercase )
return str(_lowercase )
@pytest.fixture
def a_ ( _lowercase , _lowercase ):
_UpperCamelCase : Tuple = tmp_path / '''csv_with_image.csv'''
_UpperCamelCase : int = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(_lowercase , '''w''' ) as f:
f.write(_lowercase )
return str(_lowercase )
@pytest.fixture
def a_ ( _lowercase ):
_UpperCamelCase : Dict = tmp_path / '''csv_with_label.csv'''
_UpperCamelCase : Optional[Any] = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(_lowercase , '''w''' ) as f:
f.write(_lowercase )
return str(_lowercase )
@pytest.fixture
def a_ ( _lowercase ):
_UpperCamelCase : Dict = tmp_path / '''csv_with_int_list.csv'''
_UpperCamelCase : Dict = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(_lowercase , '''w''' ) as f:
f.write(_lowercase )
return str(_lowercase )
def a_ ( _lowercase , _lowercase , _lowercase ):
_UpperCamelCase : Union[str, Any] = Csv()
_UpperCamelCase : Union[str, Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowercase , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(_lowercase ) in record.message
for record in caplog.records )
@require_pil
def a_ ( _lowercase ):
with open(_lowercase , encoding='''utf-8''' ) as f:
_UpperCamelCase : Optional[Any] = f.read().splitlines()[1]
_UpperCamelCase : int = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
_UpperCamelCase : List[str] = csv._generate_tables([[csv_file_with_image]] )
_UpperCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
_UpperCamelCase : int = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def a_ ( _lowercase ):
with open(_lowercase , encoding='''utf-8''' ) as f:
_UpperCamelCase : int = f.read().splitlines()[1:]
_UpperCamelCase : Optional[Any] = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
_UpperCamelCase : Dict = csv._generate_tables([[csv_file_with_label]] )
_UpperCamelCase : Optional[Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
_UpperCamelCase : str = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(_lowercase ) for label in labels]
def a_ ( _lowercase ):
_UpperCamelCase : Dict = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda _lowercase : [int(_lowercase ) for i in x.split()]} )
_UpperCamelCase : int = csv._generate_tables([[csv_file_with_int_list]] )
_UpperCamelCase : str = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
_UpperCamelCase : int = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 128 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _a ( unittest.TestCase ):
def snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
_UpperCamelCase : int = tempfile.mkdtemp()
_UpperCamelCase : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_UpperCamelCase : Dict = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
'''do_convert_rgb''': True,
}
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname, lowerCAmelCase__ )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCAmelCase__, lowerCAmelCase__ )
def snake_case ( self : str, **lowerCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__ )
def snake_case ( self : Union[str, Any], **lowerCAmelCase__ : Tuple ) -> str:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase__ )
def snake_case ( self : Any, **lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname, **lowerCAmelCase__ )
def snake_case ( self : str ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Any ) -> int:
'''simple docstring'''
_UpperCamelCase : List[str] = [np.random.randint(2_5_5, size=(3, 3_0, 4_0_0), dtype=np.uinta )]
_UpperCamelCase : List[Any] = [Image.fromarray(np.moveaxis(lowerCAmelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self : str ) -> Any:
'''simple docstring'''
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : int = self.get_rust_tokenizer()
_UpperCamelCase : int = self.get_image_processor()
_UpperCamelCase : Tuple = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
_UpperCamelCase : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCAmelCase__ )
_UpperCamelCase : List[Any] = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
_UpperCamelCase : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer, lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor, lowerCAmelCase__ )
def snake_case ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase : Dict = self.get_tokenizer(cls_token='''(CLS)''', sep_token='''(SEP)''' )
_UpperCamelCase : List[str] = self.get_image_processor(do_normalize=lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname, cls_token='''(CLS)''', sep_token='''(SEP)''', do_normalize=lowerCAmelCase__ )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCAmelCase__ )
def snake_case ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = self.get_image_processor()
_UpperCamelCase : str = self.get_tokenizer()
_UpperCamelCase : Optional[Any] = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
_UpperCamelCase : List[str] = self.prepare_image_inputs()
_UpperCamelCase : Any = image_processor(lowerCAmelCase__, return_tensors='''np''' )
_UpperCamelCase : Any = processor(images=lowerCAmelCase__, return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def snake_case ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = self.get_image_processor()
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
_UpperCamelCase : Any = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
_UpperCamelCase : Tuple = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase : List[str] = processor(text=lowerCAmelCase__ )
_UpperCamelCase : Any = tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def snake_case ( self : Dict ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Tuple = self.get_image_processor()
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
_UpperCamelCase : Dict = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
_UpperCamelCase : Any = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase : Union[str, Any] = self.prepare_image_inputs()
_UpperCamelCase : str = processor(text=lowerCAmelCase__, images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def snake_case ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : int = self.get_image_processor()
_UpperCamelCase : int = self.get_tokenizer()
_UpperCamelCase : Optional[Any] = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
_UpperCamelCase : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase : List[Any] = processor.batch_decode(lowerCAmelCase__ )
_UpperCamelCase : Dict = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__ )
def snake_case ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Any = self.get_image_processor()
_UpperCamelCase : Optional[int] = self.get_tokenizer()
_UpperCamelCase : Optional[Any] = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__ )
_UpperCamelCase : Any = '''Alexandra,T-shirt的价格是15便士。'''
_UpperCamelCase : int = self.prepare_image_inputs()
_UpperCamelCase : Dict = processor(text=lowerCAmelCase__, images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
| 128 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[Any] = '''distilbert'''
_snake_case : Dict = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _UpperCamelCase=3_0_5_2_2 , _UpperCamelCase=5_1_2 , _UpperCamelCase=False , _UpperCamelCase=6 , _UpperCamelCase=1_2 , _UpperCamelCase=7_6_8 , _UpperCamelCase=4 * 7_6_8 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase="gelu" , _UpperCamelCase=0.02 , _UpperCamelCase=0.1 , _UpperCamelCase=0.2 , _UpperCamelCase=0 , **_UpperCamelCase , ) -> Any:
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Tuple = sinusoidal_pos_embds
UpperCAmelCase_ : Tuple = n_layers
UpperCAmelCase_ : Optional[int] = n_heads
UpperCAmelCase_ : Optional[int] = dim
UpperCAmelCase_ : str = hidden_dim
UpperCAmelCase_ : Tuple = dropout
UpperCAmelCase_ : Optional[int] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Tuple = qa_dropout
UpperCAmelCase_ : List[str] = seq_classif_dropout
super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase_ : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 29 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : List[str] =logging.get_logger(__name__)
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class UpperCAmelCase ( snake_case_ ):
_lowercase: Any = ['''pixel_values''']
def __init__( self : Tuple , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_55 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , **__snake_case : str , ) -> None:
super().__init__(**__snake_case )
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 2_56}
_lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case )
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
_lowerCAmelCase = get_size_dict(__snake_case , param_name="""crop_size""" )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = resample
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = offset
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : int , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case )
if "shortest_edge" in size:
_lowerCAmelCase = get_resize_output_image_size(__snake_case , size["""shortest_edge"""] , default_to_square=__snake_case )
elif "height" in size and "width" in size:
_lowerCAmelCase = (size["""height"""], size["""width"""])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def lowercase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[Any] , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__snake_case , size=(size["""height"""], size["""width"""]) , data_format=__snake_case , **__snake_case )
def lowercase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : bool = True , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> Dict:
_lowerCAmelCase = image.astype(np.floataa )
if offset:
_lowerCAmelCase = image - (scale / 2)
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def lowercase__ ( self : Optional[int] , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Tuple , ) -> np.ndarray:
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def lowercase__ ( self : List[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase = to_numpy_array(__snake_case )
if do_resize:
_lowerCAmelCase = self.resize(image=__snake_case , size=__snake_case , resample=__snake_case )
if do_center_crop:
_lowerCAmelCase = self.center_crop(__snake_case , size=__snake_case )
if do_rescale:
_lowerCAmelCase = self.rescale(image=__snake_case , scale=__snake_case , offset=__snake_case )
if do_normalize:
_lowerCAmelCase = self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case )
_lowerCAmelCase = to_channel_dimension_format(__snake_case , __snake_case )
return image
def lowercase__ ( self : List[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : List[str] , ) -> PIL.Image.Image:
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = offset if offset is not None else self.offset
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(__snake_case , param_name="""crop_size""" )
if not valid_images(__snake_case ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
_lowerCAmelCase = make_batched(__snake_case )
_lowerCAmelCase = [
[
self._preprocess_image(
image=__snake_case , do_resize=__snake_case , size=__snake_case , resample=__snake_case , do_center_crop=__snake_case , crop_size=__snake_case , do_rescale=__snake_case , rescale_factor=__snake_case , offset=__snake_case , do_normalize=__snake_case , image_mean=__snake_case , image_std=__snake_case , data_format=__snake_case , )
for img in video
]
for video in videos
]
_lowerCAmelCase = {"""pixel_values""": videos}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 70 | 0 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCAmelCase__ : str =logging.get_logger(__name__)
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Any = '''linear'''
UpperCamelCase__ : Any = '''cosine'''
UpperCamelCase__ : Optional[int] = '''cosine_with_restarts'''
UpperCamelCase__ : Union[str, Any] = '''polynomial'''
UpperCamelCase__ : List[str] = '''constant'''
UpperCamelCase__ : str = '''constant_with_warmup'''
UpperCamelCase__ : Dict = '''piecewise_constant'''
def __lowercase ( a__ , a__ = -1 ) -> Optional[Any]:
return LambdaLR(a__ , lambda a__ : 1 , last_epoch=a__ )
def __lowercase ( a__ , a__ , a__ = -1 ) -> List[Any]:
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1.0 , a__ ) )
return 1.0
return LambdaLR(a__ , a__ , last_epoch=a__ )
def __lowercase ( a__ , a__ , a__ = -1 ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = step_rules.split(',' )
for rule_str in rule_list[:-1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(':' )
__SCREAMING_SNAKE_CASE = int(a__ )
__SCREAMING_SNAKE_CASE = float(a__ )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(a__ , a__ ):
def rule_func(a__ ) -> float:
__SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(a__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__SCREAMING_SNAKE_CASE = create_rules_function(a__ , a__ )
return LambdaLR(a__ , a__ , last_epoch=a__ )
def __lowercase ( a__ , a__ , a__ , a__=-1 ) -> Optional[Any]:
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(a__ , a__ , a__ )
def __lowercase ( a__ , a__ , a__ , a__ = 0.5 , a__ = -1 ) -> Optional[Any]:
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(a__ ) * 2.0 * progress )) )
return LambdaLR(a__ , a__ , a__ )
def __lowercase ( a__ , a__ , a__ , a__ = 1 , a__ = -1 ) -> Union[str, Any]:
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(a__ ) * progress) % 1.0) )) )
return LambdaLR(a__ , a__ , a__ )
def __lowercase ( a__ , a__ , a__ , a__=1E-7 , a__=1.0 , a__=-1 ) -> Any:
__SCREAMING_SNAKE_CASE = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__SCREAMING_SNAKE_CASE = lr_init - lr_end
__SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
__SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
__SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(a__ , a__ , a__ )
lowerCAmelCase__ : Dict ={
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowercase ( a__ , a__ , a__ = None , a__ = None , a__ = None , a__ = 1 , a__ = 1.0 , a__ = -1 , ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = SchedulerType(a__ )
__SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(a__ , last_epoch=a__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(a__ , step_rules=a__ , last_epoch=a__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(a__ , num_warmup_steps=a__ , last_epoch=a__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , num_cycles=a__ , last_epoch=a__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , power=a__ , last_epoch=a__ , )
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , last_epoch=a__ )
| 118 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ['''speech''']
def __init__( self , *_A , **_A ):
'''simple docstring'''
requires_backends(self , ['speech'] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ['''speech''']
def __init__( self , *_A , **_A ):
'''simple docstring'''
requires_backends(self , ['speech'] )
| 118 | 1 |
class _lowerCamelCase:
def __init__( self) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = {}
def UpperCamelCase ( self) -> None:
"""simple docstring"""
print(self.vertex)
for i in self.vertex:
print(lowerCamelCase, ' -> ', ' -> '.join([str(lowerCamelCase) for j in self.vertex[i]]))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> None:
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowerCamelCase)
else:
# else make a new vertex
_lowercase : Optional[Any] = [to_vertex]
def UpperCamelCase ( self) -> None:
"""simple docstring"""
_lowercase : Dict = [False] * len(self.vertex)
# call the recursive helper function
for i in range(len(self.vertex)):
if not visited[i]:
self.dfs_recursive(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : List[Any] = True
print(lowerCamelCase, end=' ')
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowerCamelCase, lowerCamelCase)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 21 | """simple docstring"""
import os
import sys
import unittest
lowerCAmelCase__ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase__ : Tuple = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowerCAmelCase__ : Optional[Any] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {'BertModelTest': 'BertModelTester'}
UpperCAmelCase__ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
| 98 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def lowerCamelCase ( a_ , a_ , a_ ) -> Any:
lowerCAmelCase_ = state_dict.pop(a_ )
lowerCAmelCase_ = val
def lowerCamelCase ( a_ ) -> Optional[int]:
lowerCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCAmelCase_ = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
lowerCAmelCase_ = value
else:
lowerCAmelCase_ = value
return new_state_dict
def lowerCamelCase ( a_ , a_=False ) -> Dict:
lowerCAmelCase_ = ''
if is_panoptic:
lowerCAmelCase_ = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowerCAmelCase_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[:256, :]
lowerCAmelCase_ = in_proj_bias[:256]
lowerCAmelCase_ = in_proj_weight[256:512, :]
lowerCAmelCase_ = in_proj_bias[256:512]
lowerCAmelCase_ = in_proj_weight[-256:, :]
lowerCAmelCase_ = in_proj_bias[-256:]
def lowerCamelCase ( ) -> Tuple:
lowerCAmelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase_ = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def lowerCamelCase ( a_ , a_ ) -> List[Any]:
lowerCAmelCase_ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowerCAmelCase_ = 'resnet101'
if "dc5" in model_name:
lowerCAmelCase_ = True
lowerCAmelCase_ = 'panoptic' in model_name
if is_panoptic:
lowerCAmelCase_ = 250
else:
lowerCAmelCase_ = 91
lowerCAmelCase_ = 'huggingface/label-files'
lowerCAmelCase_ = 'coco-detection-id2label.json'
lowerCAmelCase_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase_ = {int(a_ ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
# load image processor
lowerCAmelCase_ = 'coco_panoptic' if is_panoptic else 'coco_detection'
lowerCAmelCase_ = ConditionalDetrImageProcessor(format=a_ )
# prepare image
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=a_ , return_tensors='pt' )
lowerCAmelCase_ = encoding['pixel_values']
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
lowerCAmelCase_ = torch.hub.load('DeppMeng/ConditionalDETR' , a_ , pretrained=a_ ).eval()
lowerCAmelCase_ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowerCAmelCase_ = 'conditional_detr.' + src
rename_key(a_ , a_ , a_ )
lowerCAmelCase_ = rename_backbone_keys(a_ )
# query, key and value matrices need special treatment
read_in_q_k_v(a_ , is_panoptic=a_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCAmelCase_ = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
lowerCAmelCase_ = state_dict.pop(a_ )
lowerCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCAmelCase_ = state_dict.pop(a_ )
lowerCAmelCase_ = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
lowerCAmelCase_ = state_dict.pop(a_ )
lowerCAmelCase_ = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
lowerCAmelCase_ = state_dict.pop(a_ )
lowerCAmelCase_ = val
# finally, create HuggingFace model and load state dict
lowerCAmelCase_ = ConditionalDetrForSegmentation(a_ ) if is_panoptic else ConditionalDetrForObjectDetection(a_ )
model.load_state_dict(a_ )
model.eval()
model.push_to_hub(repo_id=a_ , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
lowerCAmelCase_ = conditional_detr(a_ )
lowerCAmelCase_ = model(a_ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCamelCase_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 14 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class a_ ( a_ , a_ ):
'''simple docstring'''
__a: Optional[Any] = '''nat'''
__a: int = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowercase_=4 , lowercase_=3 , lowercase_=6_4 , lowercase_=[3, 4, 6, 5] , lowercase_=[2, 4, 8, 1_6] , lowercase_=7 , lowercase_=3.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=0.02 , lowercase_=1e-5 , lowercase_=0.0 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = depths
lowerCAmelCase_ = len(lowercase_ )
lowerCAmelCase_ = num_heads
lowerCAmelCase_ = kernel_size
lowerCAmelCase_ = mlp_ratio
lowerCAmelCase_ = qkv_bias
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase_ = layer_scale_init_value
lowerCAmelCase_ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
| 14 | 1 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : Union[str, Any] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) ,1 )
self.assertEqual(x.component(2 ) ,3 )
lowercase_ : Optional[Any] = Vector()
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : Union[str, Any] = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__UpperCamelCase ) ,'(0,0,0,0,0,1)' )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : str = Vector([1, 2, 3, 4] )
self.assertEqual(len(__UpperCamelCase ) ,4 )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : str = Vector([1, 2] )
lowercase_ : List[str] = Vector([1, 2, 3, 4, 5] )
lowercase_ : Optional[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowercase_ : Optional[Any] = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() ,2.236 ,3 )
self.assertAlmostEqual(y.euclidean_length() ,7.416 ,3 )
self.assertEqual(z.euclidean_length() ,0 )
self.assertAlmostEqual(w.euclidean_length() ,7.616 ,3 )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : Union[str, Any] = Vector([1, 2, 3] )
lowercase_ : List[str] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) ,2 )
self.assertEqual((x + y).component(1 ) ,3 )
self.assertEqual((x + y).component(2 ) ,4 )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : List[str] = Vector([1, 2, 3] )
lowercase_ : str = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) ,0 )
self.assertEqual((x - y).component(1 ) ,1 )
self.assertEqual((x - y).component(2 ) ,2 )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : Any = Vector([1, 2, 3] )
lowercase_ : List[str] = Vector([2, -1, 4] ) # for test of dot product
lowercase_ : Dict = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) ,'(3.0,6.0,9.0)' )
self.assertEqual((a * b) ,0 )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count('0' ) ,10 )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 ,1 ) ) ,'(0,1,0)' )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : Optional[int] = Vector([1, 2, 3] )
lowercase_ : Optional[Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 ,__UpperCamelCase ,__UpperCamelCase ) ) ,'(3,4,7)' )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : Optional[Any] = Vector([1, 0, 0, 0, 0, 0] )
lowercase_ : Any = x.copy()
self.assertEqual(str(__UpperCamelCase ) ,str(__UpperCamelCase ) )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : List[Any] = Vector([1, 0, 0] )
x.change_component(0 ,0 )
x.change_component(1 ,1 )
self.assertEqual(str(__UpperCamelCase ) ,'(0,1,0)' )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' ,str(__UpperCamelCase ) )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
lowercase_ : Optional[Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] ,a.minor(__UpperCamelCase ,__UpperCamelCase ) )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
lowercase_ : str = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] ,a.cofactor(__UpperCamelCase ,__UpperCamelCase ) )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
self.assertEqual(-5 ,a.determinant() )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ,3 ,3 )
lowercase_ : Union[str, Any] = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' ,str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' ,str(a * 2 ) )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
a.change_component(0 ,2 ,5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' ,str(__UpperCamelCase ) )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
self.assertEqual(7 ,a.component(2 ,1 ) ,0.01 )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
lowercase_ : List[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] ,3 ,3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' ,str(a + b ) )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
lowercase_ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] ,3 ,3 )
lowercase_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] ,3 ,3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' ,str(a - b ) )
def _UpperCAmelCase ( self ) -> None:
'''simple docstring'''
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' ,str(square_zero_matrix(5 ) ) ,)
if __name__ == "__main__":
unittest.main()
| 213 | """simple docstring"""
__SCREAMING_SNAKE_CASE ={}
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowercase_ : Any = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowercase_ : Optional[int] = _calculate(days - 1 , __SCREAMING_SNAKE_CASE , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowercase_ : Any = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowercase_ : Dict = _calculate(days - 1 , __SCREAMING_SNAKE_CASE , 0 )
lowercase_ : str = state_late + state_absent + state_ontime
lowercase_ : Tuple = prizestrings
return prizestrings
def lowercase__( __SCREAMING_SNAKE_CASE : int = 30 ):
return _calculate(__SCREAMING_SNAKE_CASE , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 213 | 1 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_snake_case = logging.get_logger(__name__)
# General docstring
_snake_case = 'ResNetConfig'
# Base docstring
_snake_case = 'microsoft/resnet-50'
_snake_case = [1, 2048, 7, 7]
# Image classification docstring
_snake_case = 'microsoft/resnet-50'
_snake_case = 'tiger cat'
_snake_case = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase ( nn.Module ):
def __init__( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : str = "relu" ) -> Any:
super().__init__()
_a : Union[str, Any] = nn.Convad(
UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=UpperCAmelCase__ , stride=UpperCAmelCase__ , padding=kernel_size // 2 , bias=UpperCAmelCase__ )
_a : Union[str, Any] = nn.BatchNormad(UpperCAmelCase__ )
_a : int = ACTaFN[activation] if activation is not None else nn.Identity()
def _lowercase ( self : Any , UpperCAmelCase__ : Tensor ) -> Tensor:
_a : Dict = self.convolution(UpperCAmelCase__ )
_a : Optional[int] = self.normalization(UpperCAmelCase__ )
_a : Optional[int] = self.activation(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__( self : int , UpperCAmelCase__ : ResNetConfig ) -> Optional[int]:
super().__init__()
_a : List[str] = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_a : List[str] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_a : int = config.num_channels
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Tensor ) -> Tensor:
_a : Optional[int] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
_a : List[Any] = self.embedder(UpperCAmelCase__ )
_a : Union[str, Any] = self.pooler(UpperCAmelCase__ )
return embedding
class UpperCamelCase ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 ) -> List[str]:
super().__init__()
_a : Tuple = nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , stride=UpperCAmelCase__ , bias=UpperCAmelCase__ )
_a : List[Any] = nn.BatchNormad(UpperCAmelCase__ )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Tensor ) -> Tensor:
_a : Optional[Any] = self.convolution(UpperCAmelCase__ )
_a : Optional[Any] = self.normalization(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : str = "relu" ) -> Any:
super().__init__()
_a : List[str] = in_channels != out_channels or stride != 1
_a : Dict = (
ResNetShortCut(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
_a : int = nn.Sequential(
ResNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) , ResNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , activation=UpperCAmelCase__ ) , )
_a : List[Any] = ACTaFN[activation]
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]:
_a : str = hidden_state
_a : Tuple = self.layer(UpperCAmelCase__ )
_a : Union[str, Any] = self.shortcut(UpperCAmelCase__ )
hidden_state += residual
_a : Optional[int] = self.activation(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : str = "relu" , UpperCAmelCase__ : int = 4 ) -> Optional[int]:
super().__init__()
_a : int = in_channels != out_channels or stride != 1
_a : Dict = out_channels // reduction
_a : Optional[int] = (
ResNetShortCut(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
_a : List[str] = nn.Sequential(
ResNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 ) , ResNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) , ResNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ ) , )
_a : Optional[Any] = ACTaFN[activation]
def _lowercase ( self : Tuple , UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
_a : Optional[int] = hidden_state
_a : Tuple = self.layer(UpperCAmelCase__ )
_a : Optional[Any] = self.shortcut(UpperCAmelCase__ )
hidden_state += residual
_a : List[Any] = self.activation(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__( self : Any , UpperCAmelCase__ : ResNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) -> List[str]:
super().__init__()
_a : Optional[Any] = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
_a : Any = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , activation=config.hidden_act ) , *[layer(UpperCAmelCase__ , UpperCAmelCase__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def _lowercase ( self : Any , UpperCAmelCase__ : Tensor ) -> Tensor:
_a : int = input
for layer in self.layers:
_a : int = layer(UpperCAmelCase__ )
return hidden_state
class UpperCamelCase ( nn.Module ):
def __init__( self : Dict , UpperCAmelCase__ : ResNetConfig ) -> List[Any]:
super().__init__()
_a : List[str] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
UpperCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_a : Any = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(UpperCAmelCase__ , config.depths[1:] ):
self.stages.append(ResNetStage(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , depth=UpperCAmelCase__ ) )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tensor , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True ) -> BaseModelOutputWithNoAttention:
_a : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_a : int = hidden_states + (hidden_state,)
_a : Optional[int] = stage_module(UpperCAmelCase__ )
if output_hidden_states:
_a : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ , )
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : List[Any] = ResNetConfig
UpperCamelCase : Tuple = '''resnet'''
UpperCamelCase : Union[str, Any] = '''pixel_values'''
UpperCamelCase : Any = True
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Dict ) -> Optional[int]:
if isinstance(UpperCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(UpperCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int]=False ) -> Optional[Any]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : Tuple = value
_snake_case = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_snake_case = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , snake_case_ , )
class UpperCamelCase ( snake_case_ ):
def __init__( self : Tuple , UpperCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
super().__init__(UpperCAmelCase__ )
_a : Union[str, Any] = config
_a : Optional[Any] = ResNetEmbeddings(UpperCAmelCase__ )
_a : Union[str, Any] = ResNetEncoder(UpperCAmelCase__ )
_a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase ( self : Any , UpperCAmelCase__ : Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_a : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : Dict = return_dict if return_dict is not None else self.config.use_return_dict
_a : Union[str, Any] = self.embedder(UpperCAmelCase__ )
_a : Dict = self.encoder(
UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
_a : Union[str, Any] = encoder_outputs[0]
_a : Any = self.pooler(UpperCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase__ , pooler_output=UpperCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , snake_case_ , )
class UpperCamelCase ( snake_case_ ):
def __init__( self : Optional[int] , UpperCAmelCase__ : Dict ) -> Dict:
super().__init__(UpperCAmelCase__ )
_a : Union[str, Any] = config.num_labels
_a : int = ResNetModel(UpperCAmelCase__ )
# classification head
_a : Tuple = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[torch.LongTensor] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
_a : Dict = return_dict if return_dict is not None else self.config.use_return_dict
_a : Union[str, Any] = self.resnet(UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
_a : List[Any] = outputs.pooler_output if return_dict else outputs[1]
_a : Any = self.classifier(UpperCAmelCase__ )
_a : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_a : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_a : Any = """single_label_classification"""
else:
_a : str = """multi_label_classification"""
if self.config.problem_type == "regression":
_a : Dict = MSELoss()
if self.num_labels == 1:
_a : Any = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_a : Optional[Any] = loss_fct(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
_a : Dict = CrossEntropyLoss()
_a : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_a : Tuple = BCEWithLogitsLoss()
_a : Tuple = loss_fct(UpperCAmelCase__ , UpperCAmelCase__ )
if not return_dict:
_a : str = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase__ , logits=UpperCAmelCase__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , snake_case_ , )
class UpperCamelCase ( snake_case_ , snake_case_ ):
def __init__( self : Any , UpperCAmelCase__ : Optional[int] ) -> Tuple:
super().__init__(UpperCAmelCase__ )
super()._init_backbone(UpperCAmelCase__ )
_a : Dict = [config.embedding_size] + config.hidden_sizes
_a : List[str] = ResNetEmbeddings(UpperCAmelCase__ )
_a : Optional[Any] = ResNetEncoder(UpperCAmelCase__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@replace_return_docstrings(output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None ) -> BackboneOutput:
_a : int = return_dict if return_dict is not None else self.config.use_return_dict
_a : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : Any = self.embedder(UpperCAmelCase__ )
_a : Dict = self.encoder(UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
_a : Union[str, Any] = outputs.hidden_states
_a : Any = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_a : Dict = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=UpperCAmelCase__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=UpperCAmelCase__ , )
| 352 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_snake_case = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
_snake_case = {
'camembert-base': 512,
}
_snake_case = '▁'
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Any = VOCAB_FILES_NAMES
UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Dict = ['''input_ids''', '''attention_mask''']
UpperCamelCase : Optional[Any] = CamembertTokenizer
def __init__( self : int , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Optional[int]="</s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Tuple="<s>" , UpperCAmelCase__ : Tuple="<unk>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : int="<mask>" , UpperCAmelCase__ : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCAmelCase__ : Optional[Any] , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
_a : List[Any] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
_a : int = vocab_file
_a : int = False if not self.vocab_file else True
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : List[Any] = [self.cls_token_id]
_a : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Union[str, Any] = [self.sep_token_id]
_a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : List[str] = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
return (out_vocab_file,)
| 324 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __A, __A=False ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( __A, __A, __A=False ) -> int:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase__ = ""
else:
UpperCAmelCase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase__ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase__ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ = in_proj_bias[: config.hidden_size]
UpperCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __A ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase, __lowerCamelCase )
def lowerCAmelCase_ ( __A, __A, __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = dct.pop(__lowerCamelCase )
UpperCAmelCase__ = val
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase__ = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __A, __A, __A=False ) -> str:
'''simple docstring'''
UpperCAmelCase__ = BitConfig(
global_padding="same", layer_type="bottleneck", depths=(3, 4, 9), out_features=["stage3"], embedding_dynamic_padding=__lowerCamelCase, )
UpperCAmelCase__ = ViTHybridConfig(backbone_config=__lowerCamelCase, image_size=384, num_labels=1_000 )
UpperCAmelCase__ = False
# load original model from timm
UpperCAmelCase__ = timm.create_model(__lowerCamelCase, pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase__ = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowerCamelCase )
UpperCAmelCase__ = create_rename_keys(__lowerCamelCase, __lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
UpperCAmelCase__ = "huggingface/label-files"
UpperCAmelCase__ = "imagenet-1k-id2label.json"
UpperCAmelCase__ = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) )
UpperCAmelCase__ = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCAmelCase__ = ViTHybridModel(__lowerCamelCase ).eval()
else:
UpperCAmelCase__ = ViTHybridForImageClassification(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
# create image processor
UpperCAmelCase__ = create_transform(**resolve_data_config({}, model=__lowerCamelCase ) )
UpperCAmelCase__ = transform.transforms
UpperCAmelCase__ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
UpperCAmelCase__ = ViTHybridImageProcessor(
do_resize=__lowerCamelCase, size={"shortest_edge": timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=__lowerCamelCase, crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]}, do_normalize=__lowerCamelCase, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = transform(__lowerCamelCase ).unsqueeze(0 )
UpperCAmelCase__ = processor(__lowerCamelCase, return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(__lowerCamelCase, __lowerCamelCase )
# verify logits
with torch.no_grad():
UpperCAmelCase__ = model(__lowerCamelCase )
UpperCAmelCase__ = outputs.logits
print("Predicted class:", logits.argmax(-1 ).item() )
if base_model:
UpperCAmelCase__ = timm_model.forward_features(__lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowerCamelCase, outputs.pooler_output, atol=1e-3 )
else:
UpperCAmelCase__ = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(f"""ybelkada/{vit_name}""" )
processor.push_to_hub(f"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
UpperCamelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 65 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : Optional[int] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 123 | 0 |
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ) -> List[Any]:
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : List[Any] = len(__snake_case ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCAmelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__snake_case ):
return None
__lowerCAmelCase : str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__lowerCAmelCase : Optional[int] = left
__lowerCAmelCase : Optional[Any] = point
elif point > right:
__lowerCAmelCase : Optional[int] = right
__lowerCAmelCase : str = point
else:
if item < current_item:
__lowerCAmelCase : int = point - 1
else:
__lowerCAmelCase : Union[str, Any] = point + 1
return None
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Optional[int]:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCAmelCase : List[Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__snake_case ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__snake_case ,__snake_case ,__snake_case ,__snake_case )
elif point > right:
return interpolation_search_by_recursion(__snake_case ,__snake_case ,__snake_case ,__snake_case )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__snake_case ,__snake_case ,__snake_case ,point - 1 )
else:
return interpolation_search_by_recursion(
__snake_case ,__snake_case ,point + 1 ,__snake_case )
def _lowercase ( __snake_case ) -> Dict:
if collection != sorted(__snake_case ):
raise ValueError("Collection must be ascending sorted" )
return True
if __name__ == "__main__":
import sys
__snake_case : List[str] = 0
if debug == 1:
__snake_case : Dict = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
__snake_case : List[Any] = 67
__snake_case : int = interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print('Not found') | 58 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__snake_case : int = logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['input_features']
def __init__( self: Dict , _SCREAMING_SNAKE_CASE: Optional[Any]=80 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1_6000 , _SCREAMING_SNAKE_CASE: Optional[int]=160 , _SCREAMING_SNAKE_CASE: Dict=30 , _SCREAMING_SNAKE_CASE: str=400 , _SCREAMING_SNAKE_CASE: str=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]=False , **_SCREAMING_SNAKE_CASE: Tuple , ) -> List[Any]:
"""simple docstring"""
super().__init__(
feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = n_fft
__lowerCAmelCase : Any = hop_length
__lowerCAmelCase : List[Any] = chunk_length
__lowerCAmelCase : Dict = chunk_length * sampling_rate
__lowerCAmelCase : Optional[int] = self.n_samples // hop_length
__lowerCAmelCase : Tuple = sampling_rate
__lowerCAmelCase : int = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=_SCREAMING_SNAKE_CASE , norm="slaney" , mel_scale="slaney" , )
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: np.array) -> np.ndarray:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = spectrogram(
_SCREAMING_SNAKE_CASE , window_function(self.n_fft , "hann") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
__lowerCAmelCase : Dict = log_spec[:, :-1]
__lowerCAmelCase : List[str] = np.maximum(_SCREAMING_SNAKE_CASE , log_spec.max() - 8.0)
__lowerCAmelCase : Union[str, Any] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE: List[np.ndarray] , _SCREAMING_SNAKE_CASE: List[np.ndarray] , _SCREAMING_SNAKE_CASE: float = 0.0) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
__lowerCAmelCase : List[str] = np.array(_SCREAMING_SNAKE_CASE , np.intaa)
__lowerCAmelCase : int = []
for vector, length in zip(_SCREAMING_SNAKE_CASE , attention_mask.sum(-1)):
__lowerCAmelCase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
if length < normed_slice.shape[0]:
__lowerCAmelCase : List[str] = padding_value
normed_input_values.append(_SCREAMING_SNAKE_CASE)
else:
__lowerCAmelCase : int = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
return normed_input_values
def __call__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[str] = "max_length" , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug.")
__lowerCAmelCase : str = isinstance(_SCREAMING_SNAKE_CASE , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""")
__lowerCAmelCase : Optional[int] = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__lowerCAmelCase : str = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray):
__lowerCAmelCase : Union[str, Any] = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa)
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__lowerCAmelCase : List[Any] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__lowerCAmelCase : str = [np.asarray([raw_speech]).T]
__lowerCAmelCase : str = BatchFeature({"input_features": raw_speech})
# convert into correct format for padding
__lowerCAmelCase : Optional[int] = self.pad(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__lowerCAmelCase : List[str] = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
__lowerCAmelCase : Dict = np.stack(padded_inputs["input_features"] , axis=0)
# make sure list is in array format
__lowerCAmelCase : Union[str, Any] = padded_inputs.get("input_features").transpose(2 , 0 , 1)
__lowerCAmelCase : Dict = [self._np_extract_fbank_features(_SCREAMING_SNAKE_CASE) for waveform in input_features[0]]
if isinstance(input_features[0] , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Dict = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa) for feature in input_features]
else:
__lowerCAmelCase : Dict = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__lowerCAmelCase : Optional[Any] = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
__lowerCAmelCase : List[str] = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE)
return padded_inputs
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Dict[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
__lowerCAmelCase : str = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output | 58 | 1 |
import math
import unittest
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> bool:
"""simple docstring"""
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 32 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase__ = CLIPImageProcessor()
lowerCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
lowerCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 104 | 0 |
"""simple docstring"""
def __lowercase ( _a ):
if not isinstance(_a , _a ):
snake_case_ : str = f"Input value of [number={number}] must be an integer"
raise TypeError(_a )
if number < 0:
return False
snake_case_ : List[Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155 |
"""simple docstring"""
def __lowercase ( _a ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A , A , A ) -> List[Any]:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__a ) )
def _SCREAMING_SNAKE_CASE (A , A , A , A ) -> Union[str, Any]:
"""simple docstring"""
if index == len(__a ):
return True
# Recursive Step
for i in range(__a ):
if valid_coloring(graph[index] , __a , __a ):
# Color current vertex
lowercase__ = i
# Validate coloring
if util_color(__a , __a , __a , index + 1 ):
return True
# Backtrack
lowercase__ = -1
return False
def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]:
"""simple docstring"""
lowercase__ = [-1] * len(__a )
if util_color(__a , __a , __a , 0 ):
return colored_vertices
return []
| 2 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def UpperCAmelCase_ (self ):
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ (self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = TFViTModel(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCamelCase__ = self.image_size // 2
UpperCamelCase__ = pixel_values[:, :, :image_size, :image_size]
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , interpolate_pos_encoding=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = TFViTForImageClassification(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCamelCase__ = self.image_size // 2
UpperCamelCase__ = pixel_values[:, :, :image_size, :image_size]
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , interpolate_pos_encoding=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = TFViTForImageClassification(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __A( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def UpperCAmelCase_ (self ):
UpperCamelCase__ = TFViTModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase_ (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCAmelCase_ (self ):
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , tf.keras.layers.Layer ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __A( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ (self ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""tf""" )
# forward pass
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
| 244 | 0 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
A : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase ( __magic_name__ : Union[List, PIL.Image.Image, torch.Tensor] ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , __magic_name__ , )
if isinstance(__magic_name__ , torch.Tensor ):
return image
elif isinstance(__magic_name__ , PIL.Image.Image ):
lowercase__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowercase__ , lowercase__ = image[0].size
lowercase__ , lowercase__ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
lowercase__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
lowercase__ = np.concatenate(__magic_name__ , axis=0 )
lowercase__ = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
lowercase__ = image.transpose(0 , 3 , 1 , 2 )
lowercase__ = 2.0 * image - 1.0
lowercase__ = torch.from_numpy(__magic_name__ )
elif isinstance(image[0] , torch.Tensor ):
lowercase__ = torch.cat(__magic_name__ , dim=0 )
return image
def UpperCamelCase ( __magic_name__ : Union[List, PIL.Image.Image, torch.Tensor] ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(__magic_name__ , torch.Tensor ):
return mask
elif isinstance(__magic_name__ , PIL.Image.Image ):
lowercase__ = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
lowercase__ , lowercase__ = mask[0].size
lowercase__ , lowercase__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowercase__ = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
lowercase__ = np.concatenate(__magic_name__ , axis=0 )
lowercase__ = mask.astype(np.floataa ) / 255.0
lowercase__ = 0
lowercase__ = 1
lowercase__ = torch.from_numpy(__magic_name__ )
elif isinstance(mask[0] , torch.Tensor ):
lowercase__ = torch.cat(__magic_name__ , dim=0 )
return mask
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = 42
A__ = 42
def __init__(self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__(self : Union[str, Any] , _UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] , _UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] , _UpperCAmelCase : int = 250 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
lowercase__ = image
lowercase__ = _preprocess_image(_UpperCAmelCase )
lowercase__ = original_image.to(device=self.device , dtype=self.unet.dtype )
lowercase__ = _preprocess_mask(_UpperCAmelCase )
lowercase__ = mask_image.to(device=self.device , dtype=self.unet.dtype )
lowercase__ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(_UpperCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase__ = original_image.shape
lowercase__ = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , self.device )
lowercase__ = eta
lowercase__ = self.scheduler.timesteps[0] + 1
lowercase__ = generator[0] if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
lowercase__ = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
# compute previous image: x_t -> x_t-1
lowercase__ = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
lowercase__ = self.scheduler.undo_step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__ = t
lowercase__ = (image / 2 + 0.5).clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 360 |
import pytest
A : Optional[Any] = '__dummy_dataset1__'
A : Tuple = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def UpperCamelCase ( ) -> Any:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : Any ) -> str:
"""simple docstring"""
lowercase__ = dataset_loading_script_name
lowercase__ = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=__magic_name__ )
lowercase__ = script_dir / f'''{script_name}.py'''
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
| 146 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=10 , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=10 , _lowerCAmelCase=0.02 , _lowerCAmelCase=0.9 , _lowerCAmelCase=None , ) -> Tuple:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = patch_size
_lowerCAmelCase = tubelet_size
_lowerCAmelCase = num_frames
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = mask_ratio
_lowerCAmelCase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_lowerCAmelCase = (image_size // patch_size) ** 2
_lowerCAmelCase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_lowerCAmelCase = int(mask_ratio * self.seq_length )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ) -> Tuple:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = VideoMAEModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = VideoMAEForPreTraining(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_lowerCAmelCase = torch.ones((self.num_masks,) )
_lowerCAmelCase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_lowerCAmelCase = mask.expand(self.batch_size , -1 ).bool()
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# model only returns predictions for masked patches
_lowerCAmelCase = mask.sum().item()
_lowerCAmelCase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Optional[Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__lowerCamelCase : Union[str, Any] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Union[str, Any] = False
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = VideoMAEModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> List[str]:
_lowerCAmelCase = copy.deepcopy(_lowerCAmelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_lowerCAmelCase = torch.ones((self.model_tester.num_masks,) )
_lowerCAmelCase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_lowerCAmelCase = mask.expand(self.model_tester.batch_size , -1 ).bool()
_lowerCAmelCase = bool_masked_pos.to(_lowerCAmelCase )
if return_labels:
if model_class in [
*get_values(_lowerCAmelCase ),
]:
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def _snake_case ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def _snake_case ( self ) -> Union[str, Any]:
pass
def _snake_case ( self ) -> int:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
@slow
def _snake_case ( self ) -> Union[str, Any]:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = VideoMAEModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _snake_case ( self ) -> str:
if not self.has_attentions:
pass
else:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
for model_class in self.all_model_classes:
_lowerCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
_lowerCAmelCase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_lowerCAmelCase = len(_lowerCAmelCase )
# Check attention is always last and order is fine
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(out_len + 1 , len(_lowerCAmelCase ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _snake_case ( self ) -> List[str]:
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
_lowerCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
_lowerCAmelCase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self ) -> Optional[int]:
pass
def __a():
'''simple docstring'''
_lowerCAmelCase = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
_lowerCAmelCase = np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> Union[str, Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
_lowerCAmelCase )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_video()
_lowerCAmelCase = image_processor(_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**_lowerCAmelCase )
# verify the logits
_lowerCAmelCase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([0.3669, -0.0688, -0.2421] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def _snake_case ( self ) -> int:
_lowerCAmelCase = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(_lowerCAmelCase )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_video()
_lowerCAmelCase = image_processor(_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# add boolean mask, indicating which patches to mask
_lowerCAmelCase = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
_lowerCAmelCase = torch.load(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**_lowerCAmelCase )
# verify the logits
_lowerCAmelCase = torch.Size([1, 1408, 1536] )
_lowerCAmelCase = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=_lowerCAmelCase )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_lowerCAmelCase = torch.tensor([0.5142] , device=_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , _lowerCAmelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_lowerCAmelCase = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=_lowerCAmelCase ).to(
_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(**_lowerCAmelCase )
_lowerCAmelCase = torch.tensor(torch.tensor([0.6469] ) , device=_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , _lowerCAmelCase , atol=1E-4 ) )
| 158 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 158 | 1 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : List[Any] = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class __snake_case ( a ):
UpperCAmelCase__ : Optional[Any] = '''t5'''
UpperCAmelCase__ : Optional[int] = ['''past_key_values''']
UpperCAmelCase__ : List[str] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Tuple , _snake_case : Optional[Any]=32128 , _snake_case : int=512 , _snake_case : Union[str, Any]=64 , _snake_case : List[str]=2048 , _snake_case : Tuple=6 , _snake_case : List[str]=None , _snake_case : List[Any]=8 , _snake_case : List[Any]=32 , _snake_case : Dict=128 , _snake_case : Tuple=0.1 , _snake_case : str=1e-6 , _snake_case : List[str]=1.0 , _snake_case : List[Any]="relu" , _snake_case : str=True , _snake_case : Optional[Any]=True , _snake_case : str=0 , _snake_case : int=1 , **_snake_case : int , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = d_model
UpperCAmelCase_ = d_kv
UpperCAmelCase_ = d_ff
UpperCAmelCase_ = num_layers
UpperCAmelCase_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = relative_attention_num_buckets
UpperCAmelCase_ = relative_attention_max_distance
UpperCAmelCase_ = dropout_rate
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_factor
UpperCAmelCase_ = feed_forward_proj
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = self.feed_forward_proj.split('''-''')
UpperCAmelCase_ = act_info[-1]
UpperCAmelCase_ = act_info[0] == '''gated'''
if len(_snake_case) > 1 and act_info[0] != "gated" or len(_snake_case) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ = '''gelu_new'''
super().__init__(
pad_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , **_snake_case , )
class __snake_case ( a ):
@property
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
UpperCAmelCase_ = '''past_encoder_sequence + sequence'''
UpperCAmelCase_ = {0: '''batch'''}
UpperCAmelCase_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCAmelCase_ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''')
return common_inputs
@property
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return 13
| 357 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a )
class __snake_case ( a ):
def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
self.check_model_type(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = {}, {}
if padding is not None:
UpperCAmelCase_ = padding
if truncation is not None:
UpperCAmelCase_ = truncation
if top_k is not None:
UpperCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str):
"""simple docstring"""
if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = {'''image''': image, '''question''': question}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case)
return results
def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False):
"""simple docstring"""
UpperCAmelCase_ = load_image(inputs['''image'''])
UpperCAmelCase_ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case)
UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework)
model_inputs.update(_snake_case)
return model_inputs
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model(**_snake_case)
return model_outputs
def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case)
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
| 7 | 0 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : int =FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=snake_case__ , dtype=jnp.bfloataa )
UpperCAmelCase , UpperCAmelCase : Optional[Any] =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=snake_case__ , from_pt=snake_case__ , dtype=jnp.bfloataa )
UpperCAmelCase : int =controlnet_params
UpperCAmelCase : Any ='''bird'''
UpperCAmelCase : Any =jax.device_count()
UpperCAmelCase : List[Any] =pipe.prepare_text_inputs([prompts] * num_samples )
UpperCAmelCase : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
UpperCAmelCase : Tuple =pipe.prepare_image_inputs([canny_image] * num_samples )
UpperCAmelCase : Union[str, Any] =jax.random.PRNGKey(0 )
UpperCAmelCase : List[Any] =jax.random.split(snake_case__ , jax.device_count() )
UpperCAmelCase : Optional[Any] =replicate(snake_case__ )
UpperCAmelCase : str =shard(snake_case__ )
UpperCAmelCase : Dict =shard(snake_case__ )
UpperCAmelCase : Dict =pipe(
prompt_ids=snake_case__ , image=snake_case__ , params=snake_case__ , prng_seed=snake_case__ , num_inference_steps=50 , jit=snake_case__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
UpperCAmelCase : Optional[Any] =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase : Dict =images[0, 253:256, 253:256, -1]
UpperCAmelCase : List[str] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase : Tuple =jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Any =FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=snake_case__ , dtype=jnp.bfloataa )
UpperCAmelCase , UpperCAmelCase : Optional[int] =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=snake_case__ , from_pt=snake_case__ , dtype=jnp.bfloataa )
UpperCAmelCase : Union[str, Any] =controlnet_params
UpperCAmelCase : Optional[Any] ='''Chef in the kitchen'''
UpperCAmelCase : List[Any] =jax.device_count()
UpperCAmelCase : Tuple =pipe.prepare_text_inputs([prompts] * num_samples )
UpperCAmelCase : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
UpperCAmelCase : Dict =pipe.prepare_image_inputs([pose_image] * num_samples )
UpperCAmelCase : Optional[int] =jax.random.PRNGKey(0 )
UpperCAmelCase : Optional[Any] =jax.random.split(snake_case__ , jax.device_count() )
UpperCAmelCase : int =replicate(snake_case__ )
UpperCAmelCase : str =shard(snake_case__ )
UpperCAmelCase : Optional[int] =shard(snake_case__ )
UpperCAmelCase : Tuple =pipe(
prompt_ids=snake_case__ , image=snake_case__ , params=snake_case__ , prng_seed=snake_case__ , num_inference_steps=50 , jit=snake_case__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
UpperCAmelCase : List[Any] =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase : Tuple =images[0, 253:256, 253:256, -1]
UpperCAmelCase : Any =jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase : Optional[int] =jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 348 | from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __snake_case :
__lowerCamelCase : str = BlenderbotConfig
__lowerCamelCase : Optional[Any] = {}
__lowerCamelCase : Optional[int] = """gelu"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=20 , snake_case__=2 , snake_case__=1 , snake_case__=0 , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =parent
UpperCAmelCase : Optional[int] =batch_size
UpperCAmelCase : Dict =seq_length
UpperCAmelCase : Optional[Any] =is_training
UpperCAmelCase : List[str] =use_labels
UpperCAmelCase : List[Any] =vocab_size
UpperCAmelCase : Optional[int] =hidden_size
UpperCAmelCase : Tuple =num_hidden_layers
UpperCAmelCase : Any =num_attention_heads
UpperCAmelCase : Optional[int] =intermediate_size
UpperCAmelCase : str =hidden_dropout_prob
UpperCAmelCase : Optional[int] =attention_probs_dropout_prob
UpperCAmelCase : str =max_position_embeddings
UpperCAmelCase : List[Any] =eos_token_id
UpperCAmelCase : Optional[int] =pad_token_id
UpperCAmelCase : Tuple =bos_token_id
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : List[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : Tuple =tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : List[str] =prepare_blenderbot_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> int:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =TFBlenderbotModel(config=snake_case__ ).get_decoder()
UpperCAmelCase : Any =inputs_dict['''input_ids''']
UpperCAmelCase : str =input_ids[:1, :]
UpperCAmelCase : Tuple =inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase : Tuple =inputs_dict['''head_mask''']
UpperCAmelCase : List[Any] =1
# first forward pass
UpperCAmelCase : List[str] =model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ )
UpperCAmelCase , UpperCAmelCase : str =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase : Union[str, Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : List[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase : Tuple =tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase : int =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase : Optional[int] =model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCAmelCase : str =model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase : List[Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase : List[Any] =output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase : Dict =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , )-> str:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase : int =tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : Tuple =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : str =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : Union[str, Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowerCamelCase : Dict = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase : Dict = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Union[str, Any] = False
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] =TFBlenderbotModelTester(self )
UpperCAmelCase : List[Any] =ConfigTester(self , config_class=snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_tokenizers
@require_tf
class __snake_case ( unittest.TestCase ):
__lowerCamelCase : List[str] = ["""My friends are cool but they eat too many carbs."""]
__lowerCamelCase : Dict = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] =self.tokenizer(self.src_text , return_tensors='''tf''' )
UpperCAmelCase : Optional[int] =self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase : str =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 348 | 1 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
_UpperCAmelCase = """Usage of script: script_name <size_of_canvas:int>"""
_UpperCAmelCase = [0] * 100 + [1] * 10
random.shuffle(choice)
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : Union[str, Any] = [[False for i in range(__lowercase )] for j in range(__lowercase )]
return canvas
def UpperCamelCase ( __lowercase : list[list[bool]] ):
'''simple docstring'''
for i, row in enumerate(__lowercase ):
for j, _ in enumerate(__lowercase ):
A_ : List[Any] = bool(random.getrandbits(1 ) )
def UpperCamelCase ( __lowercase : list[list[bool]] ):
'''simple docstring'''
A_ : Tuple = np.array(__lowercase )
A_ : str = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__lowercase ):
for c, pt in enumerate(__lowercase ):
A_ : Any = __judge_point(
__lowercase ,current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
A_ : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
A_ : list[list[bool]] = current_canvas.tolist()
return return_canvas
def UpperCamelCase ( __lowercase : bool ,__lowercase : list[list[bool]] ):
'''simple docstring'''
A_ : Tuple = 0
A_ : List[str] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
A_ : List[str] = pt
if pt:
if alive < 2:
A_ : str = False
elif alive == 2 or alive == 3:
A_ : Tuple = True
elif alive > 3:
A_ : List[str] = False
else:
if alive == 3:
A_ : int = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
_UpperCAmelCase = int(sys.argv[1])
# main working structure of this module.
_UpperCAmelCase = create_canvas(canvas_size)
seed(c)
_UpperCAmelCase ,_UpperCAmelCase = plt.subplots()
fig.show()
_UpperCAmelCase = ListedColormap(["""w""", """k"""])
try:
while True:
_UpperCAmelCase = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 192 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''lilt'''
def __init__( self , lowercase=3_0_5_2_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0 , lowercase="absolute" , lowercase=None , lowercase=4 , lowercase=1_0_2_4 , **lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase , **lowercase )
A_ : str = vocab_size
A_ : Tuple = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Union[str, Any] = hidden_act
A_ : int = intermediate_size
A_ : int = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : List[str] = type_vocab_size
A_ : Any = initializer_range
A_ : str = layer_norm_eps
A_ : Optional[int] = position_embedding_type
A_ : Union[str, Any] = classifier_dropout
A_ : Optional[Any] = channel_shrink_ratio
A_ : int = max_ad_position_embeddings
| 192 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Dict:
"""simple docstring"""
for attribute in key.split('''.''' ):
_lowercase =getattr(__snake_case , __snake_case )
if weight_type is not None:
_lowercase =getattr(__snake_case , __snake_case ).shape
else:
_lowercase =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
_lowercase =value
elif weight_type == "weight_g":
_lowercase =value
elif weight_type == "weight_v":
_lowercase =value
elif weight_type == "bias":
_lowercase =value
elif weight_type == "running_mean":
_lowercase =value
elif weight_type == "running_var":
_lowercase =value
elif weight_type == "num_batches_tracked":
_lowercase =value
elif weight_type == "inv_freq":
_lowercase =value
else:
_lowercase =value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
_lowercase =[]
_lowercase =fairseq_model.state_dict()
_lowercase =hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_lowercase =False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_lowercase =True
else:
for key, mapped_key in MAPPING.items():
_lowercase ='''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowercase =True
if "*" in mapped_key:
_lowercase =name.split(__snake_case )[0].split('''.''' )[-2]
_lowercase =mapped_key.replace('''*''' , __snake_case )
if "pos_bias_u" in name:
_lowercase =None
elif "pos_bias_v" in name:
_lowercase =None
elif "weight_g" in name:
_lowercase ='''weight_g'''
elif "weight_v" in name:
_lowercase ='''weight_v'''
elif "bias" in name:
_lowercase ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowercase ='''weight'''
elif "running_mean" in name:
_lowercase ='''running_mean'''
elif "inv_freq" in name:
_lowercase ='''inv_freq'''
elif "running_var" in name:
_lowercase ='''running_var'''
elif "num_batches_tracked" in name:
_lowercase ='''num_batches_tracked'''
else:
_lowercase =None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F"Unused weights: {unused_weights}" )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> str:
"""simple docstring"""
_lowercase =full_name.split('''conv_layers.''' )[-1]
_lowercase =name.split('''.''' )
_lowercase =int(items[0] )
_lowercase =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_lowercase =value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_lowercase =value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_lowercase =value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_lowercase =value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case=None , __snake_case=None , __snake_case=True ) -> str:
"""simple docstring"""
if config_path is not None:
_lowercase =WavaVecaConformerConfig.from_pretrained(__snake_case , hidden_act='''swish''' )
else:
_lowercase =WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_lowercase ='''rotary'''
if is_finetuned:
if dict_path:
_lowercase =Dictionary.load(__snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowercase =target_dict.pad_index
_lowercase =target_dict.bos_index
_lowercase =target_dict.eos_index
_lowercase =len(target_dict.symbols )
_lowercase =os.path.join(__snake_case , '''vocab.json''' )
if not os.path.isdir(__snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__snake_case ) )
return
os.makedirs(__snake_case , exist_ok=__snake_case )
_lowercase =target_dict.indices
# fairseq has the <pad> and <s> switched
_lowercase =0
_lowercase =1
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__snake_case , __snake_case )
_lowercase =WavaVecaCTCTokenizer(
__snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__snake_case , )
_lowercase =True if config.feat_extract_norm == '''layer''' else False
_lowercase =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
_lowercase =WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
processor.save_pretrained(__snake_case )
_lowercase =WavaVecaConformerForCTC(__snake_case )
else:
_lowercase =WavaVecaConformerForPreTraining(__snake_case )
if is_finetuned:
_lowercase , _lowercase , _lowercase =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_lowercase =argparse.Namespace(task='''audio_pretraining''' )
_lowercase =fairseq.tasks.setup_task(__snake_case )
_lowercase , _lowercase , _lowercase =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__snake_case )
_lowercase =model[0].eval()
recursively_load_weights(__snake_case , __snake_case , not is_finetuned )
hf_wavavec.save_pretrained(__snake_case )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCAmelCase__ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 5 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : int = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Any = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
__snake_case : Optional[int] = 1_0_2_4
__snake_case : List[Any] = 4_0_9_6
__snake_case : List[Any] = 2_4
__snake_case : Optional[Any] = 1_6
__snake_case : str = [5, 1_1, 1_7, 2_3]
__snake_case : List[str] = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
__snake_case : Union[str, Any] = (1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
__snake_case : Tuple = 7_6_8
__snake_case : Any = [1, 1, 1, 0.5]
__snake_case : Any = [2_5_6, 5_1_2, 7_6_8, 7_6_8]
__snake_case : Any = 1_5_0
__snake_case : Optional[Any] = 1_6
__snake_case : List[str] = (1, 3_8_4, 3_8_4)
__snake_case : Tuple = False
__snake_case : Optional[Any] = "project"
if "ade" in checkpoint_url:
__snake_case : Optional[int] = True
__snake_case : List[str] = 7_6_8
__snake_case : int = [1, 1, 1, 0.5]
__snake_case : Any = 1_5_0
__snake_case : Tuple = 1_6
__snake_case : List[str] = "huggingface/label-files"
__snake_case : Union[str, Any] = "ade20k-id2label.json"
__snake_case : List[str] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) ) , "r" ) )
__snake_case : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case : Optional[Any] = idalabel
__snake_case : str = {v: k for k, v in idalabel.items()}
__snake_case : Tuple = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__snake_case : Tuple = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
__snake_case : Tuple = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
__snake_case : Optional[Any] = name.replace("patch_embed" , "" )
if "pos_embed" in name:
__snake_case : Optional[int] = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
__snake_case : List[str] = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
__snake_case : Union[str, Any] = name.replace("proj" , "projection" )
if "blocks" in name:
__snake_case : int = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
__snake_case : Tuple = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__snake_case : Any = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
__snake_case : Optional[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
__snake_case : Any = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
__snake_case : Dict = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
__snake_case : Union[str, Any] = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
__snake_case : List[Any] = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
__snake_case : str = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
__snake_case : List[str] = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
__snake_case : Optional[int] = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
__snake_case : Optional[int] = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__snake_case : int = name.replace(F'refinenet{layer_idx}' , F'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__snake_case : Any = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
__snake_case : List[Any] = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
__snake_case : Tuple = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
__snake_case : List[str] = name.replace("conv1" , "convolution1" )
if "conv2" in name:
__snake_case : str = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__snake_case : Union[str, Any] = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
__snake_case : Optional[int] = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
__snake_case : List[str] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
__snake_case : Dict = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__snake_case : Tuple = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
__snake_case : int = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
__snake_case : Union[str, Any] = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
__snake_case : Optional[Any] = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
__snake_case : Optional[int] = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
__snake_case : Dict = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
__snake_case : Union[str, Any] = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
__snake_case : Union[str, Any] = name.replace("pretrained" , "dpt" )
if "bn" in name:
__snake_case : Tuple = name.replace("bn" , "batch_norm" )
if "head" in name:
__snake_case : Dict = name.replace("head" , "head.head" )
if "encoder.norm" in name:
__snake_case : Optional[int] = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
__snake_case : Tuple = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
__snake_case : str = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
__snake_case : Tuple = name.replace(".." , "." )
if "stem.conv" in name:
__snake_case : int = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
__snake_case : Any = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
__snake_case : Optional[int] = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
__snake_case : List[Any] = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
__snake_case : Optional[int] = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
__snake_case : int = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
__snake_case : Optional[Any] = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case : int = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' )
__snake_case : Any = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : str = in_proj_weight[: config.hidden_size, :]
__snake_case : List[Any] = in_proj_bias[: config.hidden_size]
__snake_case : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
__snake_case : int = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( ):
__snake_case : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : int = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case , __snake_case : Optional[int] = get_dpt_config(__lowerCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__snake_case : Optional[int] = torch.load(__lowerCamelCase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
__snake_case : Optional[int] = state_dict.pop(__lowerCamelCase )
__snake_case : Optional[Any] = val
# read in qkv matrices
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase )
# load HuggingFace model
__snake_case : Dict = DPTForSemanticSegmentation(__lowerCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# Check outputs on an image
__snake_case : str = 4_8_0 if "ade" in checkpoint_url else 3_8_4
__snake_case : Any = DPTImageProcessor(size=__lowerCamelCase )
__snake_case : int = prepare_img()
__snake_case : Union[str, Any] = image_processor(__lowerCamelCase , return_tensors="pt" )
# forward pass
__snake_case : Dict = model(**__lowerCamelCase ).logits if "ade" in checkpoint_url else model(**__lowerCamelCase ).predicted_depth
if show_prediction:
__snake_case : int = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=__lowerCamelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
_snake_case : str = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 123 | 0 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__SCREAMING_SNAKE_CASE : Union[str, Any] = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
__SCREAMING_SNAKE_CASE : List[Any] = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
__SCREAMING_SNAKE_CASE : Optional[Any] = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
def UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=4 , lowercase_=False ):
_snake_case : Optional[Any] = compute_bleu(
reference_corpus=lowercase_ , translation_corpus=lowercase_ , max_order=lowercase_ , smooth=lowercase_ )
((_snake_case) ,(_snake_case) ,(_snake_case) ,(_snake_case) ,(_snake_case) ,(_snake_case)) : int = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
} | 284 | import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__SCREAMING_SNAKE_CASE : Any = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__SCREAMING_SNAKE_CASE : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def snake_case (__lowercase ) -> str:
'''simple docstring'''
if "://" in dataset_path:
_snake_case : Tuple = dataset_path.split("://" )[1]
return dataset_path
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def snake_case (__lowercase , __lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case : Optional[int] = not is_remote_filesystem(__lowercase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__lowercase ) , fs._strip_protocol(__lowercase ) )
else:
fs.mv(__lowercase , __lowercase , recursive=__lowercase )
def snake_case () -> None:
'''simple docstring'''
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_snake_case : List[str] = None
_snake_case : str = None
_snake_case : List[Any] = threading.Lock() | 284 | 1 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class _lowercase (a_ ):
'''simple docstring'''
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
super().__init__()
UpperCamelCase_ = value_function
UpperCamelCase_ = unet
UpperCamelCase_ = scheduler
UpperCamelCase_ = env
UpperCamelCase_ = env.get_dataset()
UpperCamelCase_ = {}
for key in self.data.keys():
try:
UpperCamelCase_ = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase_ = {}
for key in self.data.keys():
try:
UpperCamelCase_ = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase_ = env.observation_space.shape[0]
UpperCamelCase_ = env.action_space.shape[0]
def _lowerCamelCase ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def _lowerCamelCase ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def _lowerCamelCase ( self , snake_case__ ):
'''simple docstring'''
if type(snake_case__ ) is dict:
return {k: self.to_torch(snake_case__ ) for k, v in x_in.items()}
elif torch.is_tensor(snake_case__ ):
return x_in.to(self.unet.device )
return torch.tensor(snake_case__ , device=self.unet.device )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
for key, val in cond.items():
UpperCamelCase_ = val.clone()
return x_in
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = x.shape[0]
UpperCamelCase_ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase_ = torch.full((batch_size,) , snake_case__ , device=self.unet.device , dtype=torch.long )
for _ in range(snake_case__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase_ = self.value_function(x.permute(0 , 2 , 1 ) , snake_case__ ).sample
UpperCamelCase_ = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCamelCase_ = self.scheduler._get_variance(snake_case__ )
UpperCamelCase_ = torch.exp(0.5 * posterior_variance )
UpperCamelCase_ = model_std * grad
UpperCamelCase_ = 0
UpperCamelCase_ = x.detach()
UpperCamelCase_ = x + scale * grad
UpperCamelCase_ = self.reset_xa(snake_case__ , snake_case__ , self.action_dim )
UpperCamelCase_ = self.unet(x.permute(0 , 2 , 1 ) , snake_case__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase_ = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , predict_epsilon=snake_case__ )["prev_sample"]
# apply conditions to the trajectory (set the initial state)
UpperCamelCase_ = self.reset_xa(snake_case__ , snake_case__ , self.action_dim )
UpperCamelCase_ = self.to_torch(snake_case__ )
return x, y
def __call__( self , snake_case__ , snake_case__=64 , snake_case__=32 , snake_case__=2 , snake_case__=0.1 ):
'''simple docstring'''
UpperCamelCase_ = self.normalize(snake_case__ , "observations" )
UpperCamelCase_ = obs[None].repeat(snake_case__ , axis=0 )
UpperCamelCase_ = {0: self.to_torch(snake_case__ )}
UpperCamelCase_ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase_ = randn_tensor(snake_case__ , device=self.unet.device )
UpperCamelCase_ = self.reset_xa(snake_case__ , snake_case__ , self.action_dim )
UpperCamelCase_ = self.to_torch(snake_case__ )
# run the diffusion process
UpperCamelCase_ , UpperCamelCase_ = self.run_diffusion(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# sort output trajectories by value
UpperCamelCase_ = y.argsort(0 , descending=snake_case__ ).squeeze()
UpperCamelCase_ = x[sorted_idx]
UpperCamelCase_ = sorted_values[:, :, : self.action_dim]
UpperCamelCase_ = actions.detach().cpu().numpy()
UpperCamelCase_ = self.de_normalize(snake_case__ , key="actions" )
# select the action with the highest value
if y is not None:
UpperCamelCase_ = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase_ = np.random.randint(0 , snake_case__ )
UpperCamelCase_ = denorm_actions[selected_index, 0]
return denorm_actions
| 128 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class _lowercase :
'''simple docstring'''
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=50 , snake_case__=0.02 , snake_case__=True , snake_case__=None , ):
'''simple docstring'''
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = initializer_range
UpperCamelCase_ = use_labels
UpperCamelCase_ = scope
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self ):
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self ):
'''simple docstring'''
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = self.prepare_config_and_inputs()
UpperCamelCase_ = True
UpperCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = BertGenerationEncoder(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase_ = model(snake_case__ , attention_mask=snake_case__ )
UpperCamelCase_ = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = True
UpperCamelCase_ = BertGenerationEncoder(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase_ = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
UpperCamelCase_ = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = BertGenerationDecoder(config=snake_case__ ).to(snake_case__ ).eval()
# first forward pass
UpperCamelCase_ = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
UpperCamelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase_ = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0]
UpperCamelCase_ = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0]
# select random slice
UpperCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = BertGenerationDecoder(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase_ = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase (a_ , a_ , a_ , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase__ = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase__ = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = BertGenerationEncoderTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def _lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = "bert"
self.model_tester.create_and_check_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase_ = None
self.model_tester.create_and_check_model_as_decoder(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*snake_case__ )
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(snake_case__ )
@require_torch
class _lowercase (unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
UpperCamelCase_ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
UpperCamelCase_ = model(snake_case__ )[0]
UpperCamelCase_ = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , snake_case__ )
UpperCamelCase_ = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
@require_torch
class _lowercase (unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
UpperCamelCase_ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
UpperCamelCase_ = model(snake_case__ )[0]
UpperCamelCase_ = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , snake_case__ )
UpperCamelCase_ = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 128 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase__ :Tuple = logging.get_logger(__name__)
lowercase__ :List[Any] = {"vocab_file": "spiece.model"}
lowercase__ :Optional[int] = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
lowercase__ :Optional[int] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
lowercase__ :int = 0
lowercase__ :Optional[Any] = 1
lowercase__ :List[Any] = 2
lowercase__ :str = 3
lowercase__ :Optional[int] = 4
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[str] =VOCAB_FILES_NAMES
lowercase_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] ='''left'''
def __init__( self ,A__ ,A__=False ,A__=True ,A__=False ,A__="<s>" ,A__="</s>" ,A__="<unk>" ,A__="<sep>" ,A__="<pad>" ,A__="<cls>" ,A__="<mask>" ,A__=["<eop>", "<eod>"] ,A__ = None ,**A__ ,):
# Mask token behave like a normal word, i.e. include the space before it
lowercase = AddedToken(A__ ,lstrip=A__ ,rstrip=A__) if isinstance(A__ ,A__) else mask_token
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A__ ,remove_space=A__ ,keep_accents=A__ ,bos_token=A__ ,eos_token=A__ ,unk_token=A__ ,sep_token=A__ ,pad_token=A__ ,cls_token=A__ ,mask_token=A__ ,additional_special_tokens=A__ ,sp_model_kwargs=self.sp_model_kwargs ,**A__ ,)
lowercase = 3
lowercase = do_lower_case
lowercase = remove_space
lowercase = keep_accents
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(A__)
@property
def A__ ( self):
return len(self.sp_model)
def A__ ( self):
lowercase = {self.convert_ids_to_tokens(A__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self ,A__):
lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs'''):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def A__ ( self ,A__):
if self.remove_space:
lowercase = ''' '''.join(inputs.strip().split())
else:
lowercase = inputs
lowercase = outputs.replace('''``''' ,'''"''').replace('''\'\'''' ,'''"''')
if not self.keep_accents:
lowercase = unicodedata.normalize('''NFKD''' ,A__)
lowercase = ''''''.join([c for c in outputs if not unicodedata.combining(A__)])
if self.do_lower_case:
lowercase = outputs.lower()
return outputs
def A__ ( self ,A__):
lowercase = self.preprocess_text(A__)
lowercase = self.sp_model.encode(A__ ,out_type=A__)
lowercase = []
for piece in pieces:
if len(A__) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
lowercase = self.sp_model.EncodeAsPieces(piece[:-1].replace(A__ ,''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
lowercase = cur_pieces[1:]
else:
lowercase = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(A__)
else:
new_pieces.append(A__)
return new_pieces
def A__ ( self ,A__):
return self.sp_model.PieceToId(A__)
def A__ ( self ,A__):
return self.sp_model.IdToPiece(A__)
def A__ ( self ,A__):
lowercase = ''''''.join(A__).replace(A__ ,''' ''').strip()
return out_string
def A__ ( self ,A__ ,A__ = False ,A__ = None ,A__ = True ,**A__ ,):
lowercase = kwargs.pop('''use_source_tokenizer''' ,A__)
lowercase = self.convert_ids_to_tokens(A__ ,skip_special_tokens=A__)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowercase = []
lowercase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A__))
lowercase = []
sub_texts.append(A__)
else:
current_sub_text.append(A__)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A__))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowercase = ''''''.join(A__)
lowercase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowercase = self.clean_up_tokenization(A__)
return clean_text
else:
return text
def A__ ( self ,A__ ,A__ = None):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A__ ( self ,A__ ,A__ = None ,A__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ ,token_ids_a=A__ ,already_has_special_tokens=A__)
if token_ids_a is not None:
return ([0] * len(A__)) + [1] + ([0] * len(A__)) + [1, 1]
return ([0] * len(A__)) + [1, 1]
def A__ ( self ,A__ ,A__ = None):
lowercase = [self.sep_token_id]
lowercase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def A__ ( self ,A__ ,A__ = None):
if not os.path.isdir(A__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
lowercase = os.path.join(
A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(A__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,A__)
elif not os.path.isfile(self.vocab_file):
with open(A__ ,'''wb''') as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(A__)
return (out_vocab_file,)
| 97 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ :Optional[int] = logging.get_logger(__name__)
lowercase__ :List[Any] = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[Any] ='''camembert'''
def __init__( self ,A__=3_0_5_2_2 ,A__=7_6_8 ,A__=1_2 ,A__=1_2 ,A__=3_0_7_2 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=2 ,A__=0.02 ,A__=1E-12 ,A__=1 ,A__=0 ,A__=2 ,A__="absolute" ,A__=True ,A__=None ,**A__ ,):
super().__init__(pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,**A__)
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = use_cache
lowercase = classifier_dropout
class lowercase ( SCREAMING_SNAKE_CASE__ ):
@property
def A__ ( self):
if self.task == "multiple-choice":
lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 97 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A : str = logging.get_logger(__name__)
# TODO: upload to AWS
A : Dict = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''retribert'''
def __init__( self : Optional[int] , __magic_name__ : Optional[Any]=30_522 , __magic_name__ : int=768 , __magic_name__ : Dict=8 , __magic_name__ : List[Any]=12 , __magic_name__ : Tuple=3_072 , __magic_name__ : List[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : Tuple=512 , __magic_name__ : Dict=2 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=1e-12 , __magic_name__ : List[str]=True , __magic_name__ : Dict=128 , __magic_name__ : Union[str, Any]=0 , **__magic_name__ : List[Any] , ) -> Dict:
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = share_encoders
SCREAMING_SNAKE_CASE_ = projection_dim
| 118 | import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = CpmAntTokenizer
lowerCamelCase__ = False
def __A ( self : List[str] ) -> str:
super().setUp()
SCREAMING_SNAKE_CASE_ = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def __A ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
SCREAMING_SNAKE_CASE_ = "今天天气真好!"
SCREAMING_SNAKE_CASE_ = ["今天", "天气", "真", "好", "!"]
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = "今天天气真好!"
SCREAMING_SNAKE_CASE_ = [tokenizer.bos_token] + tokens
SCREAMING_SNAKE_CASE_ = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
| 118 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
a_ : Optional[Any] = """focalnet"""
def __init__( self , __UpperCAmelCase=2_24 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[1_92, 3_84, 7_68, 7_68] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1E-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Tuple:
super().__init__(**__UpperCAmelCase)
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = embed_dim
a_ = use_conv_embed
a_ = hidden_sizes
a_ = depths
a_ = focal_levels
a_ = focal_windows
a_ = hidden_act
a_ = mlp_ratio
a_ = hidden_dropout_prob
a_ = drop_path_rate
a_ = use_layerscale
a_ = layerscale_value
a_ = use_post_layernorm
a_ = use_post_layernorm_in_modulation
a_ = normalize_modulator
a_ = initializer_range
a_ = layer_norm_eps
a_ = encoder_stride
a_ = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(self.depths) + 1)]
a_ , a_ = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names) | 303 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a_ = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
a_ = F'''{src_lang}-{tgt_lang}'''
a_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase )
a_ = os.path.join(UpperCAmelCase , "README.md" )
print(F'''Generating {path}''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(UpperCAmelCase )
# make sure we are under the root of the project
UpperCamelCase_ = Path(__file__).resolve().parent.parent.parent
UpperCamelCase_ = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase_ = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name) | 303 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : str = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = state_dict.pop(lowercase_ )
A__ = val
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A__ = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A__ = value
else:
A__ = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Dict:
"""simple docstring"""
A__ = ''''''
if is_panoptic:
A__ = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
A__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:256, :]
A__ = in_proj_bias[:256]
A__ = in_proj_weight[256:512, :]
A__ = in_proj_bias[256:512]
A__ = in_proj_weight[-256:, :]
A__ = in_proj_bias[-256:]
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A__ = '''resnet101'''
if "dc5" in model_name:
A__ = True
A__ = '''panoptic''' in model_name
if is_panoptic:
A__ = 250
else:
A__ = 91
A__ = '''huggingface/label-files'''
A__ = '''coco-detection-id2label.json'''
A__ = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
A__ = {int(lowercase_ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load image processor
A__ = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
A__ = ConditionalDetrImageProcessor(format=lowercase_ )
# prepare image
A__ = prepare_img()
A__ = image_processor(images=lowercase_ , return_tensors='''pt''' )
A__ = encoding['''pixel_values''']
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
A__ = torch.hub.load('''DeppMeng/ConditionalDETR''' , lowercase_ , pretrained=lowercase_ ).eval()
A__ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A__ = '''conditional_detr.''' + src
rename_key(lowercase_ , lowercase_ , lowercase_ )
A__ = rename_backbone_keys(lowercase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase_ , is_panoptic=lowercase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A__ = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
A__ = state_dict.pop(lowercase_ )
A__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A__ = state_dict.pop(lowercase_ )
A__ = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
A__ = state_dict.pop(lowercase_ )
A__ = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A__ = state_dict.pop(lowercase_ )
A__ = val
# finally, create HuggingFace model and load state dict
A__ = ConditionalDetrForSegmentation(lowercase_ ) if is_panoptic else ConditionalDetrForObjectDetection(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
model.push_to_hub(repo_id=lowercase_ , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
A__ = conditional_detr(lowercase_ )
A__ = model(lowercase_ )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 14 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : List[Any]=30 , UpperCAmelCase__ : Any=400 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=1 / 255 , UpperCAmelCase__ : Optional[Any]=True , ) ->str:
'''simple docstring'''
A__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False) ->Optional[Any]:
'''simple docstring'''
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size['''shortest_edge'''] * h / w)
A__ = self.size['''shortest_edge''']
elif w > h:
A__ = self.size['''shortest_edge''']
A__ = int(self.size['''shortest_edge'''] * w / h)
else:
A__ = self.size['''shortest_edge''']
A__ = self.size['''shortest_edge''']
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[0])[0]
A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = DeformableDetrImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_rescale'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_pad'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''size'''))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333})
self.assertEqual(image_processor.do_pad , UpperCAmelCase__)
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84})
self.assertEqual(image_processor.do_pad , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
'''simple docstring'''
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f:
A__ = json.loads(f.read())
A__ = {'''image_id''': 39_769, '''annotations''': target}
# encode them
A__ = DeformableDetrImageProcessor()
A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''')
# verify pixel values
A__ = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4))
# verify area
A__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__))
# verify boxes
A__ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3))
# verify image_id
A__ = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__))
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__))
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__))
# verify orig_size
A__ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__))
# verify size
A__ = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]:
'''simple docstring'''
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f:
A__ = json.loads(f.read())
A__ = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
A__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''')
# encode them
A__ = DeformableDetrImageProcessor(format='''coco_panoptic''')
A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''')
# verify pixel values
A__ = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4))
# verify area
A__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__))
# verify boxes
A__ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3))
# verify image_id
A__ = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__))
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__))
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__))
# verify masks
A__ = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__)
# verify orig_size
A__ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__))
# verify size
A__ = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
| 14 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 198 |
def UpperCamelCase ( _A : str , _A : str )-> str:
"""simple docstring"""
A__ = len(_A )
A__ = len(_A )
A__ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
A__ = []
for char_count in range(_A ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_A )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 198 | 1 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
_lowerCamelCase : Tuple = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
_lowerCamelCase : Dict = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = CamembertTokenizer
UpperCamelCase = CamembertTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __magic_name__ ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : str = CamembertTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : int ):
UpperCAmelCase : List[Any] = '''<pad>'''
UpperCAmelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ), lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ), lowerCAmelCase__ )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1], '''<pad>''' )
self.assertEqual(vocab_keys[-1], '''<mask>''' )
self.assertEqual(len(lowerCAmelCase__ ), 1_0_0_4 )
def __magic_name__ ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size, 1_0_0_5 )
def __magic_name__ ( self : str ):
UpperCAmelCase : Dict = CamembertTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
UpperCAmelCase : str = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
UpperCAmelCase : Dict = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ )
UpperCAmelCase : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__ )
UpperCAmelCase : List[Any] = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__ )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
UpperCAmelCase : Optional[Any] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__ )
def __magic_name__ ( self : Optional[int] ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase : Optional[Any] = self.get_tokenizer()
UpperCAmelCase : Optional[int] = self.get_rust_tokenizer()
UpperCAmelCase : List[str] = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__ )
UpperCAmelCase : List[str] = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__ )
UpperCAmelCase : Any = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__ )
UpperCAmelCase : int = self.get_rust_tokenizer()
UpperCAmelCase : Tuple = tokenizer.encode(lowerCAmelCase__ )
UpperCAmelCase : int = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__ )
@slow
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Union[str, Any] = {'''input_ids''': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
UpperCAmelCase : str = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name='''camembert-base''', revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''', sequences=lowerCAmelCase__, )
| 336 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowercase__ : List[str] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowercase__ : Dict = logging.getLogger()
def a__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def a__ ( lowercase : Tuple, lowercase : Dict="eval" ) -> int:
"""simple docstring"""
_UpperCamelCase = os.path.join(lowercase, F"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase, '''r''' ) as f:
return json.load(lowercase )
raise ValueError(F"""can't find {path}""" )
lowercase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_flax_glue.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def snake_case__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_clm_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def snake_case__ ( self : Tuple ) -> str:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_summarization_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def snake_case__ ( self : Tuple ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_mlm_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def snake_case__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_ta_mlm_flax.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_flax_ner.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
run_qa.main()
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 324 | 0 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
@staticmethod
def lowerCAmelCase_ ( *lowerCamelCase , **lowerCamelCase ) -> Tuple:
pass
@is_pipeline_test
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
lowerCamelCase_ : Union[str, Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
snake_case_ = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
snake_case_ = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> List[Any]:
snake_case_ = object_detector(examples[0] , threshold=0.0 )
snake_case_ = len(lowerCamelCase )
self.assertGreater(lowerCamelCase , 0 )
self.assertEqual(
lowerCamelCase , [
{
"""score""": ANY(lowerCamelCase ),
"""label""": ANY(lowerCamelCase ),
"""box""": {"""xmin""": ANY(lowerCamelCase ), """ymin""": ANY(lowerCamelCase ), """xmax""": ANY(lowerCamelCase ), """ymax""": ANY(lowerCamelCase )},
}
for i in range(lowerCamelCase )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def lowerCAmelCase_ ( self ) -> str:
pass
@require_torch
def lowerCAmelCase_ ( self ) -> int:
snake_case_ = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
snake_case_ = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
snake_case_ = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case_ = pipeline("""zero-shot-object-detection""" )
snake_case_ = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
snake_case_ = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def lowerCAmelCase_ ( self ) -> Any:
pass
@require_torch
@slow
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case_ = 0.2
snake_case_ = pipeline("""zero-shot-object-detection""" )
snake_case_ = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=lowerCamelCase , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def lowerCAmelCase_ ( self ) -> Any:
snake_case_ = 2
snake_case_ = pipeline("""zero-shot-object-detection""" )
snake_case_ = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=lowerCamelCase , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , )
| 353 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Tuple = 'levit'
def __init__( self , lowerCamelCase=224 , lowerCamelCase=3 , lowerCamelCase=3 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=16 , lowerCamelCase=[128, 256, 384] , lowerCamelCase=[4, 8, 12] , lowerCamelCase=[4, 4, 4] , lowerCamelCase=[16, 16, 16] , lowerCamelCase=0 , lowerCamelCase=[2, 2, 2] , lowerCamelCase=[2, 2, 2] , lowerCamelCase=0.02 , **lowerCamelCase , ) -> Tuple:
super().__init__(**lowerCamelCase )
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = kernel_size
snake_case_ = stride
snake_case_ = padding
snake_case_ = hidden_sizes
snake_case_ = num_attention_heads
snake_case_ = depths
snake_case_ = key_dim
snake_case_ = drop_path_rate
snake_case_ = patch_size
snake_case_ = attention_ratio
snake_case_ = mlp_ratio
snake_case_ = initializer_range
snake_case_ = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Any = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self ) -> float:
return 1e-4 | 34 | 0 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_snake_case : Optional[Any] = logging.get_logger(__name__)
_snake_case : Union[str, Any] = "T5Config"
class a (lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = """mt5"""
__UpperCAmelCase : Any = MTaConfig
class a (lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = """mt5"""
__UpperCAmelCase : Any = MTaConfig
class a (lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = """mt5"""
__UpperCAmelCase : List[Any] = MTaConfig
| 123 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase : Union[str, Any] =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = TextaTextGenerationPipeline(model=__a , tokenizer=__a )
return generator, ["Something to write", "Something else"]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = generator("Something there" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
__lowerCAmelCase = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
__lowerCAmelCase = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
with self.assertRaises(__a ):
generator(4 )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
__lowerCAmelCase = 3
__lowerCAmelCase = generator(
"Something there" , num_return_sequences=__a , num_beams=__a , )
__lowerCAmelCase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(__a , __a )
__lowerCAmelCase = generator("This is a test" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
__lowerCAmelCase = generator.model.config.eos_token_id
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = generator(
["This is a test", "This is a second test"] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
| 57 | 0 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : List[str] = ConsistencyModelPipeline
_UpperCAmelCase : Dict = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_UpperCAmelCase : int = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
_UpperCAmelCase : Dict = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Any = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' ,subfolder='test_unet' ,)
return unet
@property
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' ,subfolder='test_unet_class_cond' ,)
return unet
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]=False):
if class_cond:
__lowerCamelCase : Optional[int] = self.dummy_cond_unet
else:
__lowerCamelCase : Optional[int] = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCamelCase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 ,sigma_min=0.002 ,sigma_max=80.0 ,)
__lowerCamelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : str=0):
if str(SCREAMING_SNAKE_CASE__).startswith('mps'):
__lowerCamelCase : Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [2_2, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : str = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = pipe(**SCREAMING_SNAKE_CASE__).images
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCamelCase : Any = image[0, -3:, -3:, -1]
__lowerCamelCase : Union[str, Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def lowerCAmelCase ( self : str):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[Any] = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Optional[Any] = pipe(**SCREAMING_SNAKE_CASE__).images
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
__lowerCamelCase : Dict = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : Optional[Any] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = 1
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Any = pipe(**SCREAMING_SNAKE_CASE__).images
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCamelCase : str = image[0, -3:, -3:, -1]
__lowerCamelCase : Union[str, Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def lowerCAmelCase ( self : int):
__lowerCamelCase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[str] = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = 1
__lowerCamelCase : int = None
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Any = pipe(**SCREAMING_SNAKE_CASE__).images
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : List[Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Union[str, Any]):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int=0 ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : Any="cpu" ,SCREAMING_SNAKE_CASE__ : str=torch.floataa ,SCREAMING_SNAKE_CASE__ : Dict=(1, 3, 6_4, 6_4)):
__lowerCamelCase : Any = torch.manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = {
'num_inference_steps': None,
'timesteps': [2_2, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowerCamelCase : List[str] = self.get_fixed_latents(seed=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__ ,shape=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = latents
return inputs
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Optional[int]=0 ,SCREAMING_SNAKE_CASE__ : List[Any]="cpu" ,SCREAMING_SNAKE_CASE__ : Optional[int]=torch.floataa ,SCREAMING_SNAKE_CASE__ : Optional[int]=(1, 3, 6_4, 6_4)):
if type(SCREAMING_SNAKE_CASE__) == str:
__lowerCamelCase : str = torch.device(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = randn_tensor(SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__)
return latents
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained('diffusers/consistency_models' ,subfolder='diffusers_cd_imagenet64_l2')
__lowerCamelCase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 ,sigma_min=0.002 ,sigma_max=80.0 ,)
__lowerCamelCase : Tuple = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__)
pipe.to(torch_device=SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = self.get_inputs()
__lowerCamelCase : List[Any] = pipe(**SCREAMING_SNAKE_CASE__).images
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[int] = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' ,subfolder='diffusers_cd_imagenet64_l2')
__lowerCamelCase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 ,sigma_min=0.002 ,sigma_max=80.0 ,)
__lowerCamelCase : Any = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__)
pipe.to(torch_device=SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.get_inputs()
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : Optional[Any] = pipe(**SCREAMING_SNAKE_CASE__).images
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : List[Any] = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
@require_torch_a
def lowerCAmelCase ( self : str):
__lowerCamelCase : List[str] = UNetaDModel.from_pretrained('diffusers/consistency_models' ,subfolder='diffusers_cd_imagenet64_l2')
__lowerCamelCase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 ,sigma_min=0.002 ,sigma_max=80.0 ,)
__lowerCamelCase : Dict = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__)
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ ,torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ ,enable_math=SCREAMING_SNAKE_CASE__ ,enable_mem_efficient=SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Optional[int] = pipe(**SCREAMING_SNAKE_CASE__).images
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@require_torch_a
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' ,subfolder='diffusers_cd_imagenet64_l2')
__lowerCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 ,sigma_min=0.002 ,sigma_max=80.0 ,)
__lowerCamelCase : List[str] = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__)
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ ,torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = 1
__lowerCamelCase : Dict = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ ,enable_math=SCREAMING_SNAKE_CASE__ ,enable_mem_efficient=SCREAMING_SNAKE_CASE__):
__lowerCamelCase : str = pipe(**SCREAMING_SNAKE_CASE__).images
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
| 113 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]:
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase__ , lowerCamelCase__ ) -> bool:
__lowerCamelCase : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__lowerCamelCase : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase__ ) )
# The ratio of the area for circle to square is pi/4.
__lowerCamelCase : Any = proportion * 4
print(F"The estimated value of pi is {pi_estimate}" )
print(F"The numpy value of pi is {pi}" )
print(F"The total error is {abs(pi - pi_estimate )}" )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(lowerCamelCase__ , lowerCamelCase__ ) ) for _ in range(lowerCamelCase__ ) ) * (max_value - min_value)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 1.0 ) -> None:
def identity_function(lowerCamelCase__ ) -> float:
return x
__lowerCamelCase : str = area_under_curve_estimator(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : int = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {expected_value}" )
print(F"Total error is {abs(estimated_value - expected_value )}" )
print('******************' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
def function_to_integrate(lowerCamelCase__ ) -> float:
return sqrt(4.0 - x * x )
__lowerCamelCase : Any = area_under_curve_estimator(
lowerCamelCase__ , lowerCamelCase__ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {pi}" )
print(F"Total error is {abs(estimated_value - pi )}" )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 113 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'speech_to_text_2'
_a = ['past_key_values']
_a = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] , lowerCAmelCase : Dict=1_0000 , lowerCAmelCase : List[Any]=6 , lowerCAmelCase : int=2048 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Dict="relu" , lowerCAmelCase : str=256 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : str=0.0 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : str=2 , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[int]=1024 , **lowerCAmelCase : Optional[Any] , ):
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 155 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = ['audio_values', 'audio_mask']
def __init__( self : Optional[int] , lowerCAmelCase : List[str]=2048 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Optional[Any]=[16, 16] , lowerCAmelCase : Optional[Any]=128 , lowerCAmelCase : Union[str, Any]=4_4100 , lowerCAmelCase : Any=86 , lowerCAmelCase : List[Any]=2048 , lowerCAmelCase : List[str]=0.0 , **lowerCAmelCase : Any , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
lowerCAmelCase = spectrogram_length
lowerCAmelCase = num_channels
lowerCAmelCase = patch_size
lowerCAmelCase = feature_size // self.patch_size[1]
lowerCAmelCase = n_fft
lowerCAmelCase = sampling_rate // hop_length_to_sampling_rate
lowerCAmelCase = sampling_rate
lowerCAmelCase = padding_value
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , ).T
def __lowercase ( self : int , lowerCAmelCase : np.array ):
lowerCAmelCase = spectrogram(
lowerCAmelCase , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = log_spec - 20.0
lowerCAmelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Dict , lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : Optional[bool] = True , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , **lowerCAmelCase : Dict , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCAmelCase = isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase = is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
lowerCAmelCase = np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCAmelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
lowerCAmelCase = [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCAmelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCAmelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCAmelCase = np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowerCAmelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCAmelCase = np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCAmelCase = padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
lowerCAmelCase = audio_features[i]
lowerCAmelCase = feature
# return as BatchFeature
if return_attention_mask:
lowerCAmelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
lowerCAmelCase = {"""audio_values""": padded_audio_features}
lowerCAmelCase = BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 155 | 1 |
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_lowerCamelCase , int(b / 2 ) ) * actual_power(_lowerCamelCase , int(b / 2 ) )
else:
return a * actual_power(_lowerCamelCase , int(b / 2 ) ) * actual_power(_lowerCamelCase , int(b / 2 ) )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
if b < 0:
return 1 / actual_power(_lowerCamelCase , _lowerCamelCase )
return actual_power(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
print(power(-2, -3)) | 353 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCamelCase_ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Any ) -> Optional[Any]:
raise NotImplementedError()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]:
raise NotImplementedError()
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : "AutoTokenizer" , __lowerCamelCase : bool = False , **__lowerCamelCase : Optional[Any] ) -> Optional[int]:
A : str = tokenizer
A : Tuple = skip_prompt
A : Optional[Any] = decode_kwargs
# variables used in the streaming process
A : Any = []
A : Tuple = 0
A : List[str] = True
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] ) -> int:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
A : List[str] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
A : Tuple = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
A : str = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
A : int = text[self.print_len :]
A : Union[str, Any] = []
A : Any = 0
# If the last token is a CJK character, we print the characters.
elif len(__lowerCamelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
A : Optional[int] = text[self.print_len :]
self.print_len += len(__lowerCamelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
A : Union[str, Any] = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(__lowerCamelCase )
self.on_finalized_text(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
A : Optional[int] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
A : Optional[Any] = text[self.print_len :]
A : Optional[int] = []
A : List[str] = 0
else:
A : List[Any] = ""
A : Union[str, Any] = True
self.on_finalized_text(__lowerCamelCase , stream_end=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : bool = False ) -> List[str]:
print(__lowerCamelCase , flush=__lowerCamelCase , end="" if not stream_end else None )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int ) -> Dict:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_e_0_0 and cp <= 0X9_f_f_f)
or (cp >= 0X3_4_0_0 and cp <= 0X4_d_b_f) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_a_6_d_f) #
or (cp >= 0X2_a_7_0_0 and cp <= 0X2_b_7_3_f) #
or (cp >= 0X2_b_7_4_0 and cp <= 0X2_b_8_1_f) #
or (cp >= 0X2_b_8_2_0 and cp <= 0X2_c_e_a_f) #
or (cp >= 0Xf_9_0_0 and cp <= 0Xf_a_f_f)
or (cp >= 0X2_f_8_0_0 and cp <= 0X2_f_a_1_f) #
): #
return True
return False
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Any , __lowerCamelCase : "AutoTokenizer" , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[float] = None , **__lowerCamelCase : Dict ) -> Any:
super().__init__(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
A : Tuple = Queue()
A : Dict = None
A : List[str] = timeout
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : bool = False ) -> Any:
self.text_queue.put(__lowerCamelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : int ) -> Tuple:
return self
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A : int = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value | 256 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__UpperCamelCase : Dict = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def _a ( SCREAMING_SNAKE_CASE : str = "dhaka" , SCREAMING_SNAKE_CASE : int = 5 ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = min(SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
UpperCamelCase__ : str = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
UpperCamelCase__ : List[str] = requests.get('''https://www.google.com/search''' , params=SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = BeautifulSoup(html.text , '''html.parser''' )
UpperCamelCase__ : Union[str, Any] = ''''''.join(
re.findall(r'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
UpperCamelCase__ : Optional[Any] = json.dumps(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = json.loads(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = re.findall(
r'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
UpperCamelCase__ : Optional[Any] = re.sub(
r'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(SCREAMING_SNAKE_CASE ) , )
UpperCamelCase__ : List[Any] = re.findall(
r'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
UpperCamelCase__ : Optional[int] = bytes(SCREAMING_SNAKE_CASE , '''ascii''' ).decode(
'''unicode-escape''' )
UpperCamelCase__ : List[Any] = bytes(SCREAMING_SNAKE_CASE , '''ascii''' ).decode(
'''unicode-escape''' )
UpperCamelCase__ : List[Any] = urllib.request.build_opener()
UpperCamelCase__ : Optional[Any] = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = F"query_{query.replace(' ' , '_' )}"
if not os.path.exists(SCREAMING_SNAKE_CASE ):
os.makedirs(SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
SCREAMING_SNAKE_CASE , F"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
__UpperCamelCase : List[Any] = download_images_from_google_query(sys.argv[1])
print(f"{image_count} images were downloaded to disk.")
except IndexError:
print("Please provide a search term.")
raise
| 146 | 0 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase__ =16
lowercase__ =32
def __UpperCamelCase ( lowerCAmelCase__ : Dict ):
return int(x / 2**2_0 )
class UpperCamelCase__ :
def __enter__(self : str ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__a : Union[str, Any] = torch.cuda.memory_allocated()
return self
def __exit__(self : Optional[int] , *snake_case_ : int ):
gc.collect()
torch.cuda.empty_cache()
__a : List[str] = torch.cuda.memory_allocated()
__a : str = torch.cuda.max_memory_allocated()
__a : Optional[Any] = bamb(self.end - self.begin )
__a : Any = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __UpperCamelCase ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int = 1_6 , lowerCAmelCase__ : str = "bert-base-cased" , lowerCAmelCase__ : int = 3_2_0 , lowerCAmelCase__ : int = 1_6_0 , ):
__a : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
__a : str = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': f"train[:{n_train}]", '''validation''': f"validation[:{n_val}]"} )
def tokenize_function(lowerCAmelCase__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
__a : Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a : List[Any] = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowerCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__a : Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
__a : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple ):
# Initialize accelerator
__a : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : List[Any] = config['''lr''']
__a : str = int(config['''num_epochs'''] )
__a : List[str] = int(config['''seed'''] )
__a : Tuple = int(config['''batch_size'''] )
__a : int = args.model_name_or_path
set_seed(lowerCAmelCase__ )
__a , __a : int = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : Dict = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
# Instantiate optimizer
__a : Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
__a : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__a : List[str] = 1
__a : Tuple = (len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a : Any = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase__ , )
else:
__a : List[str] = DummyScheduler(lowerCAmelCase__ , total_num_steps=lowerCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : List[Any] = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
__a : List[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__a : List[Any] = 0
# Now we train the model
__a : Dict = {}
for epoch in range(lowerCAmelCase__ , lowerCAmelCase__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
__a : Union[str, Any] = model(**lowerCAmelCase__ )
__a : Optional[int] = outputs.loss
__a : Dict = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__a : List[str] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def __UpperCamelCase ( ):
__a : List[Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=lowerCAmelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowerCAmelCase__ , )
parser.add_argument(
'''--output_dir''' , type=lowerCAmelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=lowerCAmelCase__ , default=3_2_0 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=lowerCAmelCase__ , default=1_6_0 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=lowerCAmelCase__ , default=1 , help='''Number of train epochs.''' , )
__a : Optional[int] = parser.parse_args()
__a : Tuple = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 90 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase (self : Tuple ):
__a : List[str] = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=snake_case_ ).to(snake_case_ )
__a : List[Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__a : Optional[int] = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
__a : Dict = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
__a : Optional[Any] = model(input_ids.to(snake_case_ ) , labels=labels.to(snake_case_ ) ).loss
__a : Tuple = -(labels.shape[-1] * loss.item())
__a : Dict = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 90 | 1 |
"""simple docstring"""
from math import pow, sqrt
def __lowercase ( *snake_case_ : float ) ->bool:
'''simple docstring'''
__A : List[Any] = len(SCREAMING_SNAKE_CASE__ ) > 0 and all(value > 0.0 for value in values )
return result
def __lowercase ( snake_case_ : float ,snake_case_ : float ) ->float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def __lowercase ( snake_case_ : float ,snake_case_ : float ,snake_case_ : float ) ->float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def __lowercase ( snake_case_ : float ,snake_case_ : float ,snake_case_ : float ) ->float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) ,6 )
if validate(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def __lowercase ( snake_case_ : float ,snake_case_ : float ,snake_case_ : float ) ->float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a ,2 ) ,6 )
if validate(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def __lowercase ( snake_case_ : float ,snake_case_ : float ,snake_case_ : float ) ->float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a ,2 ) / molar_mass ,6 )
if validate(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 179 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = (DPMSolverSinglestepScheduler,)
lowerCamelCase = (('num_inference_steps', 25),)
def snake_case__ ( self : Tuple,**lowercase_ : Dict )-> Optional[int]:
'''simple docstring'''
A__ = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**lowercase_ )
return config
def snake_case__ ( self : str,lowercase_ : Optional[Any]=0,**lowercase_ : Any )-> List[Any]:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('num_inference_steps',lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
A__ = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ , A__ = sample, sample
for t in range(lowercase_,time_step + scheduler.config.solver_order + 1 ):
A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self : List[str] )-> List[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Tuple,lowercase_ : Union[str, Any]=0,**lowercase_ : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('num_inference_steps',lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
A__ = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int]=None,**lowercase_ : int )-> int:
'''simple docstring'''
if scheduler is None:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
A__ = 1_0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample
return sample
def snake_case__ ( self : Any )-> str:
'''simple docstring'''
A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A__ = 5_0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_574 ) < 1E-3
def snake_case__ ( self : Optional[Any] )-> List[Any]:
'''simple docstring'''
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def snake_case__ ( self : int )-> Optional[Any]:
'''simple docstring'''
A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A__ = self.full_loop(scheduler=lowercase_ )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
A__ = DEISMultistepScheduler.from_config(scheduler.config )
A__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
A__ = UniPCMultistepScheduler.from_config(scheduler.config )
A__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A__ = self.full_loop(scheduler=lowercase_ )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def snake_case__ ( self : Tuple )-> Any:
'''simple docstring'''
self.check_over_configs(thresholding=lowercase_ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase_,prediction_type=lowercase_,sample_max_value=lowercase_,algorithm_type='dpmsolver++',solver_order=lowercase_,solver_type=lowercase_,)
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def snake_case__ ( self : Dict )-> List[Any]:
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,)
A__ = self.full_loop(
solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,)
assert not torch.isnan(lowercase_ ).any(), "Samples have nan numbers"
def snake_case__ ( self : Optional[int] )-> Tuple:
'''simple docstring'''
self.check_over_configs(lower_order_final=lowercase_ )
self.check_over_configs(lower_order_final=lowercase_ )
def snake_case__ ( self : Tuple )-> Optional[int]:
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def snake_case__ ( self : Optional[Any] )-> Tuple:
'''simple docstring'''
self.check_over_configs(variance_type=lowercase_ )
self.check_over_configs(variance_type='learned_range' )
def snake_case__ ( self : str )-> Any:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=lowercase_,time_step=0 )
def snake_case__ ( self : Tuple )-> Tuple:
'''simple docstring'''
A__ = self.full_loop()
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def snake_case__ ( self : Any )-> Union[str, Any]:
'''simple docstring'''
A__ = self.full_loop(use_karras_sigmas=lowercase_ )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_248 ) < 1E-3
def snake_case__ ( self : Union[str, Any] )-> Tuple:
'''simple docstring'''
A__ = self.full_loop(prediction_type='v_prediction' )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.1_453 ) < 1E-3
def snake_case__ ( self : Tuple )-> int:
'''simple docstring'''
A__ = self.full_loop(prediction_type='v_prediction',use_karras_sigmas=lowercase_ )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.0_649 ) < 1E-3
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(thresholding=lowercase_,dynamic_thresholding_ratio=0 )
A__ = scheduler_class(**lowercase_ )
A__ = 1_0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample
assert sample.dtype == torch.floataa
| 7 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Any:
lowerCAmelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ : List[Any] = ''
else:
lowerCAmelCase__ : Optional[int] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : Tuple = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
lowerCAmelCase__ : Tuple = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Tuple = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
lowerCAmelCase__ : List[str] = dct.pop(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = val
def lowerCAmelCase__ ( ) -> List[Any]:
lowerCAmelCase__ : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ : Tuple = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : int = ViTConfig()
lowerCAmelCase__ : List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : Optional[int] = int(vit_name[-12:-10] )
lowerCAmelCase__ : List[Any] = int(vit_name[-9:-6] )
else:
lowerCAmelCase__ : int = 1_000
lowerCAmelCase__ : Union[str, Any] = 'huggingface/label-files'
lowerCAmelCase__ : List[Any] = 'imagenet-1k-id2label.json'
lowerCAmelCase__ : Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ : Tuple = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[Any] = idalabel
lowerCAmelCase__ : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Union[str, Any] = int(vit_name[-6:-4] )
lowerCAmelCase__ : int = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
lowerCAmelCase__ : Dict = 192
lowerCAmelCase__ : List[Any] = 768
lowerCAmelCase__ : Union[str, Any] = 12
lowerCAmelCase__ : Union[str, Any] = 3
elif vit_name[9:].startswith('small' ):
lowerCAmelCase__ : int = 384
lowerCAmelCase__ : List[str] = 1_536
lowerCAmelCase__ : Any = 12
lowerCAmelCase__ : int = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
lowerCAmelCase__ : Dict = 768
lowerCAmelCase__ : List[str] = 2_304
lowerCAmelCase__ : Optional[int] = 8
lowerCAmelCase__ : Union[str, Any] = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
lowerCAmelCase__ : Optional[Any] = 1_024
lowerCAmelCase__ : Dict = 4_096
lowerCAmelCase__ : Any = 24
lowerCAmelCase__ : Union[str, Any] = 16
elif vit_name[4:].startswith('huge' ):
lowerCAmelCase__ : int = 1_280
lowerCAmelCase__ : List[str] = 5_120
lowerCAmelCase__ : Tuple = 32
lowerCAmelCase__ : List[str] = 16
# load original model from timm
lowerCAmelCase__ : Any = timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ : str = timm_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = create_rename_keys(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase__ : Tuple = ViTModel(SCREAMING_SNAKE_CASE_ ).eval()
else:
lowerCAmelCase__ : Union[str, Any] = ViTForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCAmelCase__ : str = DeiTImageProcessor(size=config.image_size )
else:
lowerCAmelCase__ : List[Any] = ViTImageProcessor(size=config.image_size )
lowerCAmelCase__ : str = image_processor(images=prepare_img() , return_tensors='pt' )
lowerCAmelCase__ : List[Any] = encoding['pixel_values']
lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ )
if base_model:
lowerCAmelCase__ : Optional[int] = timm_model.forward_features(SCREAMING_SNAKE_CASE_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.pooler_output , atol=1e-3 )
else:
lowerCAmelCase__ : Optional[int] = timm_model(SCREAMING_SNAKE_CASE_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path) | 307 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase__ ( ) -> Dict:
lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase__ ( ) -> Optional[Any]:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase__ ( ) -> Optional[int]:
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase__ ( ) -> Dict:
# laplace diagonals
lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def lowerCAmelCase__ ( ) -> List[str]:
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any:
lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate]
lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any() | 307 | 1 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
A_ : int = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
A_ : str = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
A_ : Optional[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a (datasets.Metric ):
'''simple docstring'''
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def __A ( self , A__ , A__ , A__=None , A__=True , A__=False ):
if rouge_types is None:
A__ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
A__ : Dict = rouge_scorer.RougeScorer(rouge_types=A__ , use_stemmer=A__ )
if use_aggregator:
A__ : Any = scoring.BootstrapAggregator()
else:
A__ : Any = []
for ref, pred in zip(A__ , A__ ):
A__ : List[Any] = scorer.score(A__ , A__ )
if use_aggregator:
aggregator.add_scores(A__ )
else:
scores.append(A__ )
if use_aggregator:
A__ : List[Any] = aggregator.aggregate()
else:
A__ : str = {}
for key in scores[0]:
A__ : str = [score[key] for score in scores]
return result
| 192 |
def UpperCamelCase (lowercase_: int = 10 ) -> str:
if not isinstance(lowercase_ , lowercase_ ) or n < 0:
raise ValueError("""Invalid input""" )
A__ : List[str] = 10**n
A__ : Any = 28433 * (pow(2 , 7830457 , lowercase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(10) = }''')
| 192 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Dict = StableDiffusionPanoramaPipeline
UpperCamelCase : List[Any] = TEXT_TO_IMAGE_PARAMS
UpperCamelCase : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __A ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowerCamelCase = DDIMScheduler()
torch.manual_seed(0 )
lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCamelCase = CLIPTextModel(A )
lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __A ( self , A , A=0 ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = sd_pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> List[Any]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = """french fries"""
lowerCamelCase = sd_pipe(**A , negative_prompt=A )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = sd_pipe(**A , view_batch_size=2 )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = sd_pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=A )
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = sd_pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , A=0 ) -> Dict:
'''simple docstring'''
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = """stabilityai/stable-diffusion-2-base"""
lowerCamelCase = DDIMScheduler.from_pretrained(A , subfolder="""scheduler""" )
lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(A , scheduler=A , safety_checker=A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase = self.get_inputs()
lowerCamelCase = pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowerCamelCase = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=A )
lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase = self.get_inputs()
lowerCamelCase = pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowerCamelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = 0
def callback_fn(A , A , A ) -> None:
lowerCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowerCamelCase = latents[0, -3:, -3:, -1]
lowerCamelCase = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowerCamelCase = latents[0, -3:, -3:, -1]
lowerCamelCase = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCamelCase = False
lowerCamelCase = """stabilityai/stable-diffusion-2-base"""
lowerCamelCase = DDIMScheduler.from_pretrained(A , subfolder="""scheduler""" )
lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(A , scheduler=A , safety_checker=A )
lowerCamelCase = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase = self.get_inputs()
pipe(**A , callback=A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __A ( self ) -> str:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase = """stabilityai/stable-diffusion-2-base"""
lowerCamelCase = DDIMScheduler.from_pretrained(A , subfolder="""scheduler""" )
lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(A , scheduler=A , safety_checker=A )
lowerCamelCase = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase = self.get_inputs()
lowerCamelCase = pipe(**A )
lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 66 |
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = []
lowerCamelCase = 1
while len(lowerCamelCase__ ) < 1E6:
constant.append(str(lowerCamelCase__ ) )
i += 1
lowerCamelCase = """""".join(lowerCamelCase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 66 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Any = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """codegen"""
a_ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[Any] , lowerCAmelCase_ : Optional[int]=5_0_4_0_0 , lowerCAmelCase_ : List[Any]=2_0_4_8 , lowerCAmelCase_ : Dict=2_0_4_8 , lowerCAmelCase_ : List[Any]=4_0_9_6 , lowerCAmelCase_ : List[str]=2_8 , lowerCAmelCase_ : Tuple=1_6 , lowerCAmelCase_ : Dict=6_4 , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]="gelu_new" , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : List[str]=0.0 , lowerCAmelCase_ : List[str]=1e-5 , lowerCAmelCase_ : List[Any]=0.02 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=5_0_2_5_6 , lowerCAmelCase_ : Optional[int]=5_0_2_5_6 , lowerCAmelCase_ : int=False , **lowerCAmelCase_ : Union[str, Any] , ) -> str:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_ctx
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , tie_word_embeddings=lowerCAmelCase_ , **lowerCAmelCase_ )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : str = "default" , lowerCAmelCase_ : List[PatchingSpec] = None , lowerCAmelCase_ : bool = False , ) -> str:
super().__init__(lowerCAmelCase_ , task=lowerCAmelCase_ , patching_specs=lowerCAmelCase_ , use_past=lowerCAmelCase_ )
if not getattr(self._config , 'pad_token_id' , lowerCAmelCase_ ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def lowercase ( self : int ) -> Mapping[str, Mapping[int, str]]:
__lowerCAmelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
__lowerCAmelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowercase ( self : Optional[Any] ) -> int:
return self._config.n_layer
@property
def lowercase ( self : Any ) -> int:
return self._config.n_head
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : PreTrainedTokenizer , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
__lowerCAmelCase = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs['attention_mask']
if self.use_past:
__lowerCAmelCase = ordered_inputs['attention_mask'].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowercase ( self : Dict ) -> int:
return 1_3
| 284 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int]=None, lowerCAmelCase_ : List[Any]=None ):
if attention_mask is None:
__lowerCAmelCase = tf.cast(tf.math.not_equal(lowerCAmelCase_, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _UpperCAmelCase :
"""simple docstring"""
a_ = OPTConfig
a_ = {}
a_ = """gelu"""
def __init__( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=1_3 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Any=9_9 , lowerCAmelCase_ : Any=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Tuple=2_0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_6 , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = embed_dim
__lowerCAmelCase = word_embed_proj_dim
__lowerCAmelCase = False
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase_ , **self.config_updates , )
__lowerCAmelCase = prepare_opt_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def lowercase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
__lowerCAmelCase = TFOPTModel(config=lowerCAmelCase_ )
__lowerCAmelCase = inputs_dict['input_ids']
__lowerCAmelCase = input_ids[:1, :]
__lowerCAmelCase = inputs_dict['attention_mask'][:1, :]
__lowerCAmelCase = 1
# first forward pass
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1e-3 )
@require_tf
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
a_ = (TFOPTForCausalLM,) if is_tf_available() else ()
a_ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
a_ = False
a_ = False
a_ = False
a_ = 10
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = TFOPTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] ):
if hasattr(lowerCAmelCase_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase_ )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCAmelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase_ )
# check that weights remain the same after resizing
__lowerCAmelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase = False
self.assertTrue(lowerCAmelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase_ )
__lowerCAmelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase = False
self.assertTrue(lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
return tf.constant(lowerCAmelCase_, dtype=tf.intaa )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
a_ = 99
def lowercase ( self : Optional[int] ) -> Any:
__lowerCAmelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCAmelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCAmelCase = input_ids.shape[0]
__lowerCAmelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : str ) -> List[str]:
__lowerCAmelCase = TFOPTModel.from_pretrained('facebook/opt-350m' )
__lowerCAmelCase = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase = tf.not_equal(lowerCAmelCase_ , model.config.pad_token_id )
with tf.GradientTape():
__lowerCAmelCase = model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).last_hidden_state
__lowerCAmelCase = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-3 ) )
__lowerCAmelCase = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
__lowerCAmelCase = xla_generate(lowerCAmelCase_ , lowerCAmelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-2 ) )
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : int ) -> Dict:
super().setUp()
__lowerCAmelCase = 'facebook/opt-350m'
def lowercase ( self : Dict ) -> Any:
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCAmelCase = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCAmelCase = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
__lowerCAmelCase = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
__lowerCAmelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase ( self : Optional[int] ) -> int:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowercase ( self : int ) -> str:
__lowerCAmelCase = 'facebook/opt-125m'
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
__lowerCAmelCase = []
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_ , max_length=1_0 )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase = 'facebook/opt-350m'
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = 'left'
# use different length sentences to test batching
__lowerCAmelCase = [
'Hello, my dog is a little',
'Today, I',
]
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' , padding=lowerCAmelCase_ )
__lowerCAmelCase = inputs['input_ids']
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ , attention_mask=inputs['attention_mask'] )
__lowerCAmelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ )
__lowerCAmelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
__lowerCAmelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ , max_length=model.config.max_length - num_paddings )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , [non_padded_sentence, padded_sentence] )
def lowercase ( self : List[Any] ) -> List[Any]:
__lowerCAmelCase = 'facebook/opt-350m'
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
__lowerCAmelCase = []
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_ , max_length=1_0 )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 284 | 1 |
import math
import random
def _UpperCamelCase (a__ :float , a__ :bool = False ):
"""simple docstring"""
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
UpperCamelCase__ = 0.02
def _UpperCamelCase (a__ :int , a__ :int ):
"""simple docstring"""
UpperCamelCase__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(a__ ):
# Forward propagation
UpperCamelCase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCamelCase__ = (expected / 100) - layer_a
# Error delta
UpperCamelCase__ = layer_1_error * sigmoid_function(a__ , a__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = int(input("Expected value: "))
UpperCamelCase__ = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 360 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=64 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
UpperCamelCase__ = vocab_size - 1
def _lowerCamelCase ( self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = True
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = GPTNeoXModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = True
UpperCamelCase__ = GPTNeoXModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = GPTNeoXForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForQuestionAnswering(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForTokenClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = True
UpperCamelCase__ = GPTNeoXForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# first forward pass
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
UpperCamelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
UpperCamelCase__ = output_from_no_past["""hidden_states"""][0]
UpperCamelCase__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ):
snake_case : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case : Union[str, Any] = (GPTNeoXForCausalLM,) if is_torch_available() else ()
snake_case : Dict = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case : Tuple = False
snake_case : Dict = False
snake_case : Tuple = False
snake_case : Any = False
def _lowerCamelCase ( self ):
UpperCamelCase__ = GPTNeoXModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=64 , num_attention_heads=8 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ = None
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _lowerCamelCase ( self ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ = GPTNeoXModel(__lowerCAmelCase )
original_model.to(__lowerCAmelCase )
original_model.eval()
UpperCamelCase__ = original_model(__lowerCAmelCase ).last_hidden_state
UpperCamelCase__ = original_model(__lowerCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ = {"""type""": scaling_type, """factor""": 10.0}
UpperCamelCase__ = GPTNeoXModel(__lowerCAmelCase )
scaled_model.to(__lowerCAmelCase )
scaled_model.eval()
UpperCamelCase__ = scaled_model(__lowerCAmelCase ).last_hidden_state
UpperCamelCase__ = scaled_model(__lowerCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCamelCase__ = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__lowerCAmelCase )
UpperCamelCase__ = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__lowerCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCamelCase__ = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCamelCase__ = model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=20 )
UpperCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 87 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.